aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/atm/fore200e.c11
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c14
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/isdn/capi/Kconfig16
-rw-r--r--drivers/isdn/capi/capi.c1203
-rw-r--r--drivers/isdn/capi/capidrv.c103
-rw-r--r--drivers/isdn/capi/capifs.c126
-rw-r--r--drivers/isdn/capi/capifs.h21
-rw-r--r--drivers/isdn/capi/kcapi.c817
-rw-r--r--drivers/isdn/capi/kcapi.h13
-rw-r--r--drivers/isdn/capi/kcapi_proc.c41
-rw-r--r--drivers/isdn/gigaset/capi.c75
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h6
-rw-r--r--drivers/isdn/hardware/avm/b1.c54
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c71
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pcmcia.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c53
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c2
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c40
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c45
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c48
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c198
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hysdn/hycapi.c56
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/message/i2o/i2o_proc.c11
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c50
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/log.h31
-rw-r--r--drivers/misc/iwmc3200top/main.c59
-rw-r--r--drivers/net/3c505.c6
-rw-r--r--drivers/net/3c509.c10
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c11
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/7990.c2
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c9
-rw-r--r--drivers/net/82596.c8
-rw-r--r--drivers/net/Kconfig74
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/a2065.c2
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/amd8111e.c12
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c4
-rw-r--r--drivers/net/arm/am79c961a.c2
-rw-r--r--drivers/net/arm/at91_ether.c4
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/arm/ks8695net.c4
-rw-r--r--drivers/net/at1700.c6
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h11
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c83
-rw-r--r--drivers/net/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/atl1c/atl1c_main.c124
-rw-r--r--drivers/net/atl1e/atl1e_hw.c23
-rw-r--r--drivers/net/atl1e/atl1e_main.c158
-rw-r--r--drivers/net/atl1e/atl1e_param.c35
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/atp.c7
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/b44.c6
-rw-r--r--drivers/net/bcm63xx_enet.c4
-rw-r--r--drivers/net/benet/Kconfig4
-rw-r--r--drivers/net/benet/be.h12
-rw-r--r--drivers/net/benet/be_cmds.c62
-rw-r--r--drivers/net/benet/be_cmds.h20
-rw-r--r--drivers/net/benet/be_ethtool.c63
-rw-r--r--drivers/net/benet/be_hw.h119
-rw-r--r--drivers/net/benet/be_main.c392
-rw-r--r--drivers/net/bfin_mac.c4
-rw-r--r--drivers/net/bmac.c8
-rw-r--r--drivers/net/bnx2.c193
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x_main.c13
-rw-r--r--drivers/net/bonding/bond_main.c25
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/mcp251x.c426
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c472
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/common.h6
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/sge.c16
-rw-r--r--drivers/net/cxgb3/t3_hw.c3
-rw-r--r--drivers/net/cxgb3/xgmac.c3
-rw-r--r--drivers/net/davinci_emac.c7
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/defxx.c13
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dl2k.c6
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e100.c8
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_ethtool.c19
-rw-r--r--drivers/net/e1000/e1000_main.c12
-rw-r--r--drivers/net/e1000e/82571.c68
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h19
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/ich8lan.c1
-rw-r--r--drivers/net/e1000e/lib.c230
-rw-r--r--drivers/net/e1000e/netdev.c45
-rw-r--r--drivers/net/eepro.c15
-rw-r--r--drivers/net/eexpress.c6
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c198
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c8
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/ewrk3.c2
-rw-r--r--drivers/net/fealnx.c6
-rw-r--r--drivers/net/fec.c80
-rw-r--r--drivers/net/fec_mpc52xx.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamachi.c8
-rw-r--r--drivers/net/hp100.c9
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibmveth.c5
-rw-r--r--drivers/net/igb/e1000_82575.h3
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_main.c156
-rw-r--r--drivers/net/igbvf/netdev.c21
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_sir.c823
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/isa-skeleton.c5
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/ixgb/ixgb.h11
-rw-r--r--drivers/net/ixgb/ixgb_main.c97
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h54
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c233
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c193
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c608
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h66
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c58
-rw-r--r--drivers/net/jme.h41
-rw-r--r--drivers/net/korina.c6
-rw-r--r--drivers/net/ks8851.c4
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ksz884x.c7335
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c14
-rw-r--r--drivers/net/ll_temac_main.c6
-rw-r--r--drivers/net/lp486e.c12
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/macb.c33
-rw-r--r--drivers/net/mace.c2
-rw-r--r--drivers/net/macmace.c2
-rw-r--r--drivers/net/macvlan.c117
-rw-r--r--drivers/net/macvtap.c590
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c29
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c213
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c697
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c6
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c14
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c4
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c8
-rw-r--r--drivers/net/pcnet32.c504
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/phy_device.c16
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/ps3_gelic_net.c2
-rw-r--r--drivers/net/ps3_gelic_wireless.c149
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1126
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1015
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1275
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1541
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2720
-rw-r--r--drivers/net/qlge/qlge.h444
-rw-r--r--drivers/net/qlge/qlge_dbg.c1183
-rw-r--r--drivers/net/qlge/qlge_ethtool.c49
-rw-r--r--drivers/net/qlge/qlge_main.c1187
-rw-r--r--drivers/net/qlge/qlge_mpi.c340
-rw-r--r--drivers/net/r6040.c13
-rw-r--r--drivers/net/r8169.c147
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c10
-rw-r--r--drivers/net/sc92031.c4
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c6
-rw-r--r--drivers/net/sfc/mcdi.c109
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h202
-rw-r--r--drivers/net/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/net_driver.h17
-rw-r--r--drivers/net/sfc/nic.c13
-rw-r--r--drivers/net/sfc/qt202x_phy.c1
-rw-r--r--drivers/net/sfc/selftest.c42
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c16
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c10
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c6
-rw-r--r--drivers/net/skfp/skfddi.c27
-rw-r--r--drivers/net/skge.c81
-rw-r--r--drivers/net/sky2.c662
-rw-r--r--drivers/net/sky2.h10
-rw-r--r--drivers/net/smc911x.c12
-rw-r--r--drivers/net/smc9194.c4
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/smsc9420.c4
-rw-r--r--drivers/net/sonic.c8
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h279
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)210
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c245
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)351
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sunbmac.c4
-rw-r--r--drivers/net/sundance.c8
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/sunhme.c10
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/sunvnet.c5
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c153
-rw-r--r--drivers/net/tehuti.h30
-rw-r--r--drivers/net/tg3.c240
-rw-r--r--drivers/net/tg3.h33
-rw-r--r--drivers/net/tlan.c32
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c4
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/de2104x.c148
-rw-r--r--drivers/net/tulip/de4x5.c6
-rw-r--r--drivers/net/tulip/dmfe.c84
-rw-r--r--drivers/net/tulip/eeprom.c47
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c182
-rw-r--r--drivers/net/tulip/uli526x.c60
-rw-r--r--drivers/net/tulip/winbond-840.c186
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c101
-rw-r--r--drivers/net/typhoon.c11
-rw-r--r--drivers/net/ucc_geth.c26
-rw-r--r--drivers/net/usb/asix.c12
-rw-r--r--drivers/net/usb/catc.c10
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c252
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/rtl8150.c9
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/via-rhine.c7
-rw-r--r--drivers/net/via-velocity.c14
-rw-r--r--drivers/net/virtio_net.c470
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c10
-rw-r--r--drivers/net/vxge/vxge-main.c14
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wireless/adm8211.c27
-rw-r--r--drivers/net/wireless/airo.c38
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h17
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c123
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c108
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c199
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c442
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c863
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1416
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c62
-rw-r--r--drivers/net/wireless/ath/debug.h8
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig17
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h5
-rw-r--r--drivers/net/wireless/b43/main.c96
-rw-r--r--drivers/net/wireless/b43/phy_common.c45
-rw-r--r--drivers/net/wireless/b43/phy_common.h10
-rw-r--r--drivers/net/wireless/b43/phy_lp.c76
-rw-r--r--drivers/net/wireless/b43/phy_n.c3035
-rw-r--r--drivers/net/wireless/b43/phy_n.h98
-rw-r--r--drivers/net/wireless/b43/pio.c17
-rw-r--r--drivers/net/wireless/b43/pio.h45
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c744
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h100
-rw-r--r--drivers/net/wireless/b43legacy/dma.c20
-rw-r--r--drivers/net/wireless/b43legacy/dma.h10
-rw-r--r--drivers/net/wireless/b43legacy/main.c61
-rw-r--r--drivers/net/wireless/b43legacy/pio.c13
-rw-r--r--drivers/net/wireless/b43legacy/pio.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c339
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c333
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1401
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c155
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c71
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c95
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c21
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h8
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c73
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c13
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c166
-rw-r--r--drivers/net/wireless/mwl8k.c2146
-rw-r--r--drivers/net/wireless/orinoco/main.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c23
-rw-r--r--drivers/net/wireless/p54/p54.h8
-rw-r--r--drivers/net/wireless/p54/p54pci.c76
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c48
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c118
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c57
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h36
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c32
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c38
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c26
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c375
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h35
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c134
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h33
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h100
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c20
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c492
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c43
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c384
-rw-r--r--drivers/net/yellowfin.c9
-rw-r--r--drivers/pci/iov.c15
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c169
-rw-r--r--drivers/s390/net/qeth_core_mpc.h44
-rw-r--r--drivers/s390/net/qeth_core_sys.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c56
-rw-r--r--drivers/ssb/driver_mipscore.c5
-rw-r--r--drivers/staging/et131x/et131x_netdev.c10
-rw-r--r--drivers/staging/netwave/netwave_cs.c8
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h9
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c4
-rw-r--r--drivers/staging/slicoss/slicoss.c2
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/wavelan/wavelan.c8
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c11
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c18
-rw-r--r--drivers/vhost/Kconfig11
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/net.c661
-rw-r--r--drivers/vhost/vhost.c1098
-rw-r--r--drivers/vhost/vhost.h161
-rw-r--r--drivers/virtio/virtio_ring.c25
626 files changed, 52457 insertions, 14171 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a1..81e36596b1e9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
+obj-$(CONFIG_VHOST_NET) += vhost/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc53fed89b1e..f7d6ebaa0418 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2064,12 +2064,10 @@ fore200e_get_esi(struct fore200e* fore200e)
return -EBUSY;
}
- printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
fore200e->name,
(prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
- prom->serial_number & 0xFFFF,
- prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
- prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
+ prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
for (i = 0; i < ESI_LEN; i++) {
fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
@@ -2845,13 +2843,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" interrupt line:\t\t%s\n"
" physical base address:\t0x%p\n"
" virtual base address:\t0x%p\n"
- " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
+ " factory address (ESI):\t%pM\n"
" board serial number:\t\t%d\n\n",
fore200e_irq_itoa(fore200e->irq),
(void*)fore200e->phys_base,
fore200e->virt_base,
- fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
- fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
+ fore200e->esi,
fore200e->esi[4] * 256 + fore200e->esi[5]);
return len;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e33ae0025b12..01f36c08cb52 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3557,10 +3557,7 @@ init_card(struct atm_dev *dev)
if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6);
- printk("%s: ESI %02x:%02x:%02x:%02x:%02x:%02x\n",
- card->name, card->atmdev->esi[0], card->atmdev->esi[1],
- card->atmdev->esi[2], card->atmdev->esi[3],
- card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
}
/*
* XXX: </hack>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cf97c34cbaf1..7fe7c324e7ef 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -998,9 +998,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
(unsigned int) e[EEPROM_MAC_REV + i]);
return -EIO;
}
- DPRINTK("eeprom: MAC address = %02X:%02X:%02X:%02X:%02X:%02X\n",
- e[EEPROM_MAC + 0], e[EEPROM_MAC + 1], e[EEPROM_MAC + 2],
- e[EEPROM_MAC + 3], e[EEPROM_MAC + 4], e[EEPROM_MAC + 5]);
+ DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
/* Verify serial number */
lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
@@ -2483,14 +2481,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
return sprintf(page, "revision: board=%d, pci_if=%d\n",
lanai->board_rev, (int) lanai->pci->revision);
if (left-- == 0)
- return sprintf(page, "EEPROM ESI: "
- "%02X:%02X:%02X:%02X:%02X:%02X\n",
- lanai->eeprom[EEPROM_MAC + 0],
- lanai->eeprom[EEPROM_MAC + 1],
- lanai->eeprom[EEPROM_MAC + 2],
- lanai->eeprom[EEPROM_MAC + 3],
- lanai->eeprom[EEPROM_MAC + 4],
- lanai->eeprom[EEPROM_MAC + 5]);
+ return sprintf(page, "EEPROM ESI: %pM\n",
+ &lanai->eeprom[EEPROM_MAC]);
if (left-- == 0)
return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
"GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 3da804b1627d..50838407b117 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -807,9 +807,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
}
}
- printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
- card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
- card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
card->atmdev->dev_data = card;
card->atmdev->ci_range.vpi_bits = card->vpibits;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 051d1ebbd287..5aeb3b541c80 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -380,7 +380,6 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
struct ibft_nic *nic = entry->nic;
void *ibft_loc = entry->header;
char *str = buf;
- char *mac;
int val;
if (!nic)
@@ -421,10 +420,7 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
str += sprintf(str, "%d\n", nic->vlan);
break;
case ibft_eth_mac:
- mac = nic->mac;
- str += sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- (u8)mac[0], (u8)mac[1], (u8)mac[2],
- (u8)mac[3], (u8)mac[4], (u8)mac[5]);
+ str += sprintf(str, "%pM\n", nic->mac);
break;
case ibft_eth_hostname:
str += sprintf_string(str, nic->hostname_len,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 66b41351910a..d94388b81a40 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1371,15 +1371,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
tim.mac_addr = req->dst_mac;
tim.vlan_tag = ntohs(req->vlan_tag);
if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- printk(KERN_ERR
- "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
- __func__,
- req->dst_mac[0],
- req->dst_mac[1],
- req->dst_mac[2],
- req->dst_mac[3],
- req->dst_mac[4],
- req->dst_mac[5]);
+ printk(KERN_ERR "%s bad dst mac %pM\n",
+ __func__, req->dst_mac);
goto reject;
}
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index ab1102780186..c04f8fc6fc2d 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -862,7 +862,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
- netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
+ netdev_mc_count(netdev), !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
multicast_addr = netdev->mc_list;
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index b2a04755c96a..a168e8a891be 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,8 +17,7 @@ config CAPI_TRACE
If unsure, say Y.
config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "CAPI2.0 Middleware support"
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -35,18 +34,19 @@ config ISDN_CAPI_CAPI20
Y/M here.
config ISDN_CAPI_CAPIFS_BOOL
- bool "CAPI2.0 filesystem support"
+ bool "CAPI2.0 filesystem support (DEPRECATED)"
depends on ISDN_CAPI_MIDDLEWARE && ISDN_CAPI_CAPI20
+ help
+ This option provides a special file system, similar to /dev/pts with
+ device nodes for the special ttys established by using the
+ middleware extension above.
+ You no longer need this, udev fully replaces it. This feature is
+ scheduled for removal.
config ISDN_CAPI_CAPIFS
tristate
depends on ISDN_CAPI_CAPIFS_BOOL
default ISDN_CAPI_CAPI20
- help
- This option provides a special file system, similar to /dev/pts with
- device nodes for the special ttys established by using the
- middleware extension above. If you want to use pppd with
- pppdcapiplugin to dial up to your ISP, say Y here.
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 65bf91e16a42..ee5837522f5a 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -23,16 +23,13 @@
#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/wait.h>
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
#include <linux/tty.h>
-#ifdef CONFIG_PPP
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
-#endif /* CONFIG_PPP */
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
@@ -41,35 +38,29 @@
#include <linux/moduleparam.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
-#include "capifs.h"
-#endif
-static char *revision = "$Revision: 1.1.2.7 $";
+#include "capifs.h"
MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
-#undef _DEBUG_REFCOUNT /* alloc/free and open/close debug */
#undef _DEBUG_TTYFUNCS /* call to tty_driver */
#undef _DEBUG_DATAFLOW /* data flow */
/* -------- driver information -------------------------------------- */
static struct class *capi_class;
-
static int capi_major = 68; /* allocated */
+
+module_param_named(major, capi_major, uint, 0);
+
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-#define CAPINC_NR_PORTS 32
+#define CAPINC_NR_PORTS 32
#define CAPINC_MAX_PORTS 256
-static int capi_ttymajor = 191;
+
static int capi_ttyminors = CAPINC_NR_PORTS;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-module_param_named(major, capi_major, uint, 0);
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-module_param_named(ttymajor, capi_ttymajor, uint, 0);
module_param_named(ttyminors, capi_ttyminors, uint, 0);
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -83,53 +74,43 @@ module_param_named(ttyminors, capi_ttyminors, uint, 0);
struct capidev;
struct capincci;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor;
-struct datahandle_queue {
+struct ackqueue_entry {
struct list_head list;
u16 datahandle;
};
struct capiminor {
- struct list_head list;
- struct capincci *nccip;
+ struct kref kref;
+
unsigned int minor;
+ struct dentry *capifs_dentry;
- struct capi20_appl *ap;
- u32 ncci;
- u16 datahandle;
- u16 msgid;
+ struct capi20_appl *ap;
+ u32 ncci;
+ atomic_t datahandle;
+ atomic_t msgid;
- struct tty_struct *tty;
+ struct tty_port port;
int ttyinstop;
int ttyoutstop;
- struct sk_buff *ttyskb;
- atomic_t ttyopencount;
- struct sk_buff_head inqueue;
- int inbytes;
- struct sk_buff_head outqueue;
- int outbytes;
+ struct sk_buff_head inqueue;
+
+ struct sk_buff_head outqueue;
+ int outbytes;
+ struct sk_buff *outskb;
+ spinlock_t outlock;
/* transmit path */
struct list_head ackqueue;
int nack;
spinlock_t ackqlock;
};
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-
-/* FIXME: The following lock is a sledgehammer-workaround to a
- * locking issue with the capiminor (and maybe other) data structure(s).
- * Access to this data is done in a racy way and crashes the machine with
- * a FritzCard DSL driver; sooner or later. This is a workaround
- * which trades scalability vs stability, so it doesn't crash the kernel anymore.
- * The correct (and scalable) fix for the issue seems to require
- * an API change to the drivers... . */
-static DEFINE_SPINLOCK(workaround_lock);
struct capincci {
- struct capincci *next;
+ struct list_head list;
u32 ncci;
struct capidev *cdev;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
@@ -146,28 +127,28 @@ struct capidev {
struct sk_buff_head recvqueue;
wait_queue_head_t recvwait;
- struct capincci *nccis;
+ struct list_head nccis;
- struct mutex ncci_list_mtx;
+ struct mutex lock;
};
/* -------- global variables ---------------------------------------- */
-static DEFINE_RWLOCK(capidev_list_lock);
+static DEFINE_MUTEX(capidev_list_lock);
static LIST_HEAD(capidev_list);
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-static DEFINE_RWLOCK(capiminor_list_lock);
-static LIST_HEAD(capiminor_list);
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
+static DEFINE_SPINLOCK(capiminors_lock);
+static struct capiminor **capiminors;
+
+static struct tty_driver *capinc_tty_driver;
+
/* -------- datahandles --------------------------------------------- */
-static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
+static int capiminor_add_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue *n;
- unsigned long flags;
+ struct ackqueue_entry *n;
n = kmalloc(sizeof(*n), GFP_ATOMIC);
if (unlikely(!n)) {
@@ -176,253 +157,246 @@ static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
}
n->datahandle = datahandle;
INIT_LIST_HEAD(&n->list);
- spin_lock_irqsave(&mp->ackqlock, flags);
+ spin_lock_bh(&mp->ackqlock);
list_add_tail(&n->list, &mp->ackqueue);
mp->nack++;
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
return 0;
}
static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue *p, *tmp;
- unsigned long flags;
+ struct ackqueue_entry *p, *tmp;
- spin_lock_irqsave(&mp->ackqlock, flags);
+ spin_lock_bh(&mp->ackqlock);
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
if (p->datahandle == datahandle) {
list_del(&p->list);
- kfree(p);
mp->nack--;
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
+ kfree(p);
return 0;
}
}
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
return -1;
}
static void capiminor_del_all_ack(struct capiminor *mp)
{
- struct datahandle_queue *p, *tmp;
- unsigned long flags;
+ struct ackqueue_entry *p, *tmp;
- spin_lock_irqsave(&mp->ackqlock, flags);
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
list_del(&p->list);
kfree(p);
mp->nack--;
}
- spin_unlock_irqrestore(&mp->ackqlock, flags);
}
/* -------- struct capiminor ---------------------------------------- */
+static const struct tty_port_operations capiminor_port_ops; /* we have none */
+
static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
{
- struct capiminor *mp, *p;
- unsigned int minor = 0;
- unsigned long flags;
+ struct capiminor *mp;
+ struct device *dev;
+ unsigned int minor;
- mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
+ mp = kzalloc(sizeof(*mp), GFP_KERNEL);
if (!mp) {
printk(KERN_ERR "capi: can't alloc capiminor\n");
return NULL;
}
+ kref_init(&mp->kref);
+
mp->ap = ap;
mp->ncci = ncci;
- mp->msgid = 0;
- atomic_set(&mp->ttyopencount,0);
INIT_LIST_HEAD(&mp->ackqueue);
spin_lock_init(&mp->ackqlock);
skb_queue_head_init(&mp->inqueue);
skb_queue_head_init(&mp->outqueue);
+ spin_lock_init(&mp->outlock);
- /* Allocate the least unused minor number.
- */
- write_lock_irqsave(&capiminor_list_lock, flags);
- if (list_empty(&capiminor_list))
- list_add(&mp->list, &capiminor_list);
- else {
- list_for_each_entry(p, &capiminor_list, list) {
- if (p->minor > minor)
- break;
- minor++;
- }
-
- if (minor < capi_ttyminors) {
- mp->minor = minor;
- list_add(&mp->list, p->list.prev);
+ tty_port_init(&mp->port);
+ mp->port.ops = &capiminor_port_ops;
+
+ /* Allocate the least unused minor number. */
+ spin_lock(&capiminors_lock);
+ for (minor = 0; minor < capi_ttyminors; minor++)
+ if (!capiminors[minor]) {
+ capiminors[minor] = mp;
+ break;
}
- }
- write_unlock_irqrestore(&capiminor_list_lock, flags);
+ spin_unlock(&capiminors_lock);
- if (!(minor < capi_ttyminors)) {
+ if (minor == capi_ttyminors) {
printk(KERN_NOTICE "capi: out of minors\n");
- kfree(mp);
- return NULL;
+ goto err_out1;
}
+ mp->minor = minor;
+
+ dev = tty_register_device(capinc_tty_driver, minor, NULL);
+ if (IS_ERR(dev))
+ goto err_out2;
+
return mp;
+
+err_out2:
+ spin_lock(&capiminors_lock);
+ capiminors[minor] = NULL;
+ spin_unlock(&capiminors_lock);
+
+err_out1:
+ kfree(mp);
+ return NULL;
}
-static void capiminor_free(struct capiminor *mp)
+static void capiminor_destroy(struct kref *kref)
{
- unsigned long flags;
-
- write_lock_irqsave(&capiminor_list_lock, flags);
- list_del(&mp->list);
- write_unlock_irqrestore(&capiminor_list_lock, flags);
+ struct capiminor *mp = container_of(kref, struct capiminor, kref);
- kfree_skb(mp->ttyskb);
- mp->ttyskb = NULL;
+ kfree_skb(mp->outskb);
skb_queue_purge(&mp->inqueue);
skb_queue_purge(&mp->outqueue);
capiminor_del_all_ack(mp);
kfree(mp);
}
-static struct capiminor *capiminor_find(unsigned int minor)
+static struct capiminor *capiminor_get(unsigned int minor)
{
- struct list_head *l;
- struct capiminor *p = NULL;
+ struct capiminor *mp;
- read_lock(&capiminor_list_lock);
- list_for_each(l, &capiminor_list) {
- p = list_entry(l, struct capiminor, list);
- if (p->minor == minor)
- break;
- }
- read_unlock(&capiminor_list_lock);
- if (l == &capiminor_list)
- return NULL;
+ spin_lock(&capiminors_lock);
+ mp = capiminors[minor];
+ if (mp)
+ kref_get(&mp->kref);
+ spin_unlock(&capiminors_lock);
- return p;
+ return mp;
+}
+
+static inline void capiminor_put(struct capiminor *mp)
+{
+ kref_put(&mp->kref, capiminor_destroy);
+}
+
+static void capiminor_free(struct capiminor *mp)
+{
+ tty_unregister_device(capinc_tty_driver, mp->minor);
+
+ spin_lock(&capiminors_lock);
+ capiminors[mp->minor] = NULL;
+ spin_unlock(&capiminors_lock);
+
+ capiminor_put(mp);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- struct capincci ----------------------------------------- */
-static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
+static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np)
{
- struct capincci *np, **pp;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp = NULL;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+ struct capiminor *mp;
+ dev_t device;
- np = kzalloc(sizeof(*np), GFP_ATOMIC);
- if (!np)
- return NULL;
- np->ncci = ncci;
- np->cdev = cdev;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- mp = NULL;
- if (cdev->userflags & CAPIFLAG_HIGHJACKING)
- mp = np->minorp = capiminor_alloc(&cdev->ap, ncci);
+ if (!(cdev->userflags & CAPIFLAG_HIGHJACKING))
+ return;
+
+ mp = np->minorp = capiminor_alloc(&cdev->ap, np->ncci);
if (mp) {
- mp->nccip = np;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "set mp->nccip\n");
-#endif
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
- capifs_new_ncci(mp->minor, MKDEV(capi_ttymajor, mp->minor));
-#endif
+ device = MKDEV(capinc_tty_driver->major, mp->minor);
+ mp->capifs_dentry = capifs_new_ncci(mp->minor, device);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- for (pp=&cdev->nccis; *pp; pp = &(*pp)->next)
- ;
- *pp = np;
- return np;
}
-static void capincci_free(struct capidev *cdev, u32 ncci)
+static void capincci_free_minor(struct capincci *np)
{
- struct capincci *np, **pp;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+ struct capiminor *mp = np->minorp;
+ struct tty_struct *tty;
- pp=&cdev->nccis;
- while (*pp) {
- np = *pp;
- if (ncci == 0xffffffff || np->ncci == ncci) {
- *pp = (*pp)->next;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = np->minorp) != NULL) {
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
- capifs_free_ncci(mp->minor);
-#endif
- if (mp->tty) {
- mp->nccip = NULL;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "reset mp->nccip\n");
-#endif
- tty_hangup(mp->tty);
- } else {
- capiminor_free(mp);
- }
- }
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- kfree(np);
- if (*pp == NULL) return;
- } else {
- pp = &(*pp)->next;
+ if (mp) {
+ capifs_free_ncci(mp->capifs_dentry);
+
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
}
+
+ capiminor_free(mp);
}
}
-static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
+static inline unsigned int capincci_minor_opencount(struct capincci *np)
{
- struct capincci *p;
+ struct capiminor *mp = np->minorp;
+ unsigned int count = 0;
+ struct tty_struct *tty;
- for (p=cdev->nccis; p ; p = p->next) {
- if (p->ncci == ncci)
- break;
+ if (mp) {
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ count = tty->count;
+ tty_kref_put(tty);
+ }
}
- return p;
+ return count;
}
-/* -------- struct capidev ------------------------------------------ */
+#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static inline void
+capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
+static inline void capincci_free_minor(struct capincci *np) { }
-static struct capidev *capidev_alloc(void)
+static inline unsigned int capincci_minor_opencount(struct capincci *np)
{
- struct capidev *cdev;
- unsigned long flags;
+ return 0;
+}
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
+{
+ struct capincci *np;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
return NULL;
+ np->ncci = ncci;
+ np->cdev = cdev;
- mutex_init(&cdev->ncci_list_mtx);
- skb_queue_head_init(&cdev->recvqueue);
- init_waitqueue_head(&cdev->recvwait);
- write_lock_irqsave(&capidev_list_lock, flags);
- list_add_tail(&cdev->list, &capidev_list);
- write_unlock_irqrestore(&capidev_list_lock, flags);
- return cdev;
+ capincci_alloc_minor(cdev, np);
+
+ list_add_tail(&np->list, &cdev->nccis);
+
+ return np;
}
-static void capidev_free(struct capidev *cdev)
+static void capincci_free(struct capidev *cdev, u32 ncci)
{
- unsigned long flags;
+ struct capincci *np, *tmp;
- if (cdev->ap.applid) {
- capi20_release(&cdev->ap);
- cdev->ap.applid = 0;
- }
- skb_queue_purge(&cdev->recvqueue);
+ list_for_each_entry_safe(np, tmp, &cdev->nccis, list)
+ if (ncci == 0xffffffff || np->ncci == ncci) {
+ capincci_free_minor(np);
+ list_del(&np->list);
+ kfree(np);
+ }
+}
- mutex_lock(&cdev->ncci_list_mtx);
- capincci_free(cdev, 0xffffffff);
- mutex_unlock(&cdev->ncci_list_mtx);
+static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
+{
+ struct capincci *np;
- write_lock_irqsave(&capidev_list_lock, flags);
- list_del(&cdev->list);
- write_unlock_irqrestore(&capidev_list_lock, flags);
- kfree(cdev);
+ list_for_each_entry(np, &cdev->nccis, list)
+ if (np->ncci == ncci)
+ return np;
+ return NULL;
}
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
@@ -432,7 +406,7 @@ static struct sk_buff *
gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
{
struct sk_buff *nskb;
- nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_ATOMIC);
+ nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_KERNEL);
if (nskb) {
u16 datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4+4+2);
unsigned char *s = skb_put(nskb, CAPI_DATA_B3_RESP_LEN);
@@ -440,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
capimsg_setu16(s, 2, mp->ap->applid);
capimsg_setu8 (s, 4, CAPI_DATA_B3);
capimsg_setu8 (s, 5, CAPI_RESP);
- capimsg_setu16(s, 6, mp->msgid++);
+ capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(s, 8, mp->ncci);
capimsg_setu16(s, 12, datahandle);
}
@@ -449,122 +423,156 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
{
+ unsigned int datalen = skb->len - CAPIMSG_LEN(skb->data);
+ struct tty_struct *tty;
struct sk_buff *nskb;
- int datalen;
u16 errcode, datahandle;
struct tty_ldisc *ld;
-
- datalen = skb->len - CAPIMSG_LEN(skb->data);
- if (mp->tty == NULL)
- {
+ int ret = -1;
+
+ tty = tty_port_tty_get(&mp->port);
+ if (!tty) {
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi: currently no receiver\n");
#endif
return -1;
}
- ld = tty_ldisc_ref(mp->tty);
- if (ld == NULL)
- return -1;
+ ld = tty_ldisc_ref(tty);
+ if (!ld) {
+ /* fatal error, do not requeue */
+ ret = 0;
+ kfree_skb(skb);
+ goto deref_tty;
+ }
+
if (ld->ops->receive_buf == NULL) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: ldisc has no receive_buf function\n");
#endif
- goto bad;
+ /* fatal error, do not requeue */
+ goto free_skb;
}
if (mp->ttyinstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: recv tty throttled\n");
#endif
- goto bad;
+ goto deref_ldisc;
}
- if (mp->tty->receive_room < datalen) {
+
+ if (tty->receive_room < datalen) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: no room in tty\n");
#endif
- goto bad;
+ goto deref_ldisc;
}
- if ((nskb = gen_data_b3_resp_for(mp, skb)) == NULL) {
+
+ nskb = gen_data_b3_resp_for(mp, skb);
+ if (!nskb) {
printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
- goto bad;
+ goto deref_ldisc;
}
- datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4);
+
+ datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
+
errcode = capi20_put_message(mp->ap, nskb);
- if (errcode != CAPI_NOERROR) {
+
+ if (errcode == CAPI_NOERROR) {
+ skb_pull(skb, CAPIMSG_LEN(skb->data));
+#ifdef _DEBUG_DATAFLOW
+ printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => ldisc\n",
+ datahandle, skb->len);
+#endif
+ ld->ops->receive_buf(tty, skb->data, NULL, skb->len);
+ } else {
printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
errcode);
kfree_skb(nskb);
- goto bad;
+
+ if (errcode == CAPI_SENDQUEUEFULL)
+ goto deref_ldisc;
}
- (void)skb_pull(skb, CAPIMSG_LEN(skb->data));
-#ifdef _DEBUG_DATAFLOW
- printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => ldisc\n",
- datahandle, skb->len);
-#endif
- ld->ops->receive_buf(mp->tty, skb->data, NULL, skb->len);
+
+free_skb:
+ ret = 0;
kfree_skb(skb);
+
+deref_ldisc:
tty_ldisc_deref(ld);
- return 0;
-bad:
- tty_ldisc_deref(ld);
- return -1;
+
+deref_tty:
+ tty_kref_put(tty);
+ return ret;
}
static void handle_minor_recv(struct capiminor *mp)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mp->inqueue)) != NULL) {
- unsigned int len = skb->len;
- mp->inbytes -= len;
+
+ while ((skb = skb_dequeue(&mp->inqueue)) != NULL)
if (handle_recv_skb(mp, skb) < 0) {
skb_queue_head(&mp->inqueue, skb);
- mp->inbytes += len;
return;
}
- }
}
-static int handle_minor_send(struct capiminor *mp)
+static void handle_minor_send(struct capiminor *mp)
{
+ struct tty_struct *tty;
struct sk_buff *skb;
u16 len;
- int count = 0;
u16 errcode;
u16 datahandle;
- if (mp->tty && mp->ttyoutstop) {
+ tty = tty_port_tty_get(&mp->port);
+ if (!tty)
+ return;
+
+ if (mp->ttyoutstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: send: tty stopped\n");
#endif
- return 0;
+ tty_kref_put(tty);
+ return;
}
- while ((skb = skb_dequeue(&mp->outqueue)) != NULL) {
- datahandle = mp->datahandle;
+ while (1) {
+ spin_lock_bh(&mp->outlock);
+ skb = __skb_dequeue(&mp->outqueue);
+ if (!skb) {
+ spin_unlock_bh(&mp->outlock);
+ break;
+ }
len = (u16)skb->len;
+ mp->outbytes -= len;
+ spin_unlock_bh(&mp->outlock);
+
+ datahandle = atomic_inc_return(&mp->datahandle);
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 2, mp->ap->applid);
capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
capimsg_setu8 (skb->data, 5, CAPI_REQ);
- capimsg_setu16(skb->data, 6, mp->msgid++);
+ capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
- if (capincci_add_ack(mp, datahandle) < 0) {
+ if (capiminor_add_ack(mp, datahandle) < 0) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
- skb_queue_head(&mp->outqueue, skb);
- return count;
+
+ spin_lock_bh(&mp->outlock);
+ __skb_queue_head(&mp->outqueue, skb);
+ mp->outbytes += len;
+ spin_unlock_bh(&mp->outlock);
+
+ break;
}
errcode = capi20_put_message(mp->ap, skb);
if (errcode == CAPI_NOERROR) {
- mp->datahandle++;
- count++;
- mp->outbytes -= len;
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi: DATA_B3_REQ %u len=%u\n",
datahandle, len);
@@ -575,16 +583,20 @@ static int handle_minor_send(struct capiminor *mp)
if (errcode == CAPI_SENDQUEUEFULL) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
- skb_queue_head(&mp->outqueue, skb);
+
+ spin_lock_bh(&mp->outlock);
+ __skb_queue_head(&mp->outqueue, skb);
+ mp->outbytes += len;
+ spin_unlock_bh(&mp->outlock);
+
break;
}
/* ups, drop packet */
printk(KERN_ERR "capi: put_message = %x\n", errcode);
- mp->outbytes -= len;
kfree_skb(skb);
}
- return count;
+ tty_kref_put(tty);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -594,65 +606,56 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
struct capidev *cdev = ap->private;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
+ struct tty_struct *tty;
struct capiminor *mp;
u16 datahandle;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
struct capincci *np;
- u32 ncci;
- unsigned long flags;
+
+ mutex_lock(&cdev->lock);
if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
u16 info = CAPIMSG_U16(skb->data, 12); // Info field
- if ((info & 0xff00) == 0) {
- mutex_lock(&cdev->ncci_list_mtx);
+ if ((info & 0xff00) == 0)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
- }
}
- if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND) {
- mutex_lock(&cdev->ncci_list_mtx);
+ if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
- }
- spin_lock_irqsave(&workaround_lock, flags);
+
if (CAPIMSG_COMMAND(skb->data) != CAPI_DATA_B3) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
- ncci = CAPIMSG_CONTROL(skb->data);
- for (np = cdev->nccis; np && np->ncci != ncci; np = np->next)
- ;
+
+ np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
if (!np) {
printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
+
#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
+
#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+
mp = np->minorp;
if (!mp) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
-
-
if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_IND) {
-
datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+4+2);
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi_signal: DATA_B3_IND %u len=%d\n",
datahandle, skb->len-CAPIMSG_LEN(skb->data));
#endif
skb_queue_tail(&mp->inqueue, skb);
- mp->inbytes += skb->len;
+
handle_minor_recv(mp);
} else if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_CONF) {
@@ -664,10 +667,13 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+2));
#endif
kfree_skb(skb);
- (void)capiminor_del_ack(mp, datahandle);
- if (mp->tty)
- tty_wakeup(mp->tty);
- (void)handle_minor_send(mp);
+ capiminor_del_ack(mp, datahandle);
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ handle_minor_send(mp);
} else {
/* ups, let capi application handle it :-) */
@@ -675,7 +681,9 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
wake_up_interruptible(&cdev->recvwait);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- spin_unlock_irqrestore(&workaround_lock, flags);
+
+unlock_out:
+ mutex_unlock(&cdev->lock);
}
/* -------- file_operations for capidev ----------------------------- */
@@ -686,24 +694,19 @@ capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct capidev *cdev = (struct capidev *)file->private_data;
struct sk_buff *skb;
size_t copied;
+ int err;
if (!cdev->ap.applid)
return -ENODEV;
- if ((skb = skb_dequeue(&cdev->recvqueue)) == NULL) {
-
+ skb = skb_dequeue(&cdev->recvqueue);
+ if (!skb) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
-
- for (;;) {
- interruptible_sleep_on(&cdev->recvwait);
- if ((skb = skb_dequeue(&cdev->recvqueue)) != NULL)
- break;
- if (signal_pending(current))
- break;
- }
- if (skb == NULL)
- return -ERESTARTNOHAND;
+ err = wait_event_interruptible(cdev->recvwait,
+ (skb = skb_dequeue(&cdev->recvqueue)));
+ if (err)
+ return err;
}
if (skb->len > count) {
skb_queue_head(&cdev->recvqueue, skb);
@@ -753,9 +756,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
- mutex_lock(&cdev->ncci_list_mtx);
+ mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
+ mutex_unlock(&cdev->lock);
}
cdev->errcode = capi20_put_message(&cdev->ap, skb);
@@ -788,30 +791,35 @@ capi_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
- struct capi20_appl *ap = &cdev->ap;
capi_ioctl_struct data;
int retval = -EINVAL;
void __user *argp = (void __user *)arg;
switch (cmd) {
case CAPI_REGISTER:
- {
- if (ap->applid)
- return -EEXIST;
+ mutex_lock(&cdev->lock);
- if (copy_from_user(&cdev->ap.rparam, argp,
- sizeof(struct capi_register_params)))
- return -EFAULT;
-
- cdev->ap.private = cdev;
- cdev->ap.recv_message = capi_recv_message;
- cdev->errcode = capi20_register(ap);
- if (cdev->errcode) {
- ap->applid = 0;
- return -EIO;
- }
+ if (cdev->ap.applid) {
+ retval = -EEXIST;
+ goto register_out;
+ }
+ if (copy_from_user(&cdev->ap.rparam, argp,
+ sizeof(struct capi_register_params))) {
+ retval = -EFAULT;
+ goto register_out;
+ }
+ cdev->ap.private = cdev;
+ cdev->ap.recv_message = capi_recv_message;
+ cdev->errcode = capi20_register(&cdev->ap);
+ retval = (int)cdev->ap.applid;
+ if (cdev->errcode) {
+ cdev->ap.applid = 0;
+ retval = -EIO;
}
- return (int)ap->applid;
+
+register_out:
+ mutex_unlock(&cdev->lock);
+ return retval;
case CAPI_GET_VERSION:
{
@@ -910,101 +918,104 @@ capi_ioctl(struct inode *inode, struct file *file,
return 0;
case CAPI_SET_FLAGS:
- case CAPI_CLR_FLAGS:
- {
- unsigned userflags;
- if (copy_from_user(&userflags, argp,
- sizeof(userflags)))
- return -EFAULT;
- if (cmd == CAPI_SET_FLAGS)
- cdev->userflags |= userflags;
- else
- cdev->userflags &= ~userflags;
- }
- return 0;
+ case CAPI_CLR_FLAGS: {
+ unsigned userflags;
+
+ if (copy_from_user(&userflags, argp, sizeof(userflags)))
+ return -EFAULT;
+ mutex_lock(&cdev->lock);
+ if (cmd == CAPI_SET_FLAGS)
+ cdev->userflags |= userflags;
+ else
+ cdev->userflags &= ~userflags;
+ mutex_unlock(&cdev->lock);
+ return 0;
+ }
case CAPI_GET_FLAGS:
if (copy_to_user(argp, &cdev->userflags,
sizeof(cdev->userflags)))
return -EFAULT;
return 0;
- case CAPI_NCCI_OPENCOUNT:
- {
- struct capincci *nccip;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- unsigned ncci;
- int count = 0;
- if (copy_from_user(&ncci, argp, sizeof(ncci)))
- return -EFAULT;
+ case CAPI_NCCI_OPENCOUNT: {
+ struct capincci *nccip;
+ unsigned ncci;
+ int count = 0;
- mutex_lock(&cdev->ncci_list_mtx);
- if ((nccip = capincci_find(cdev, (u32) ncci)) == NULL) {
- mutex_unlock(&cdev->ncci_list_mtx);
- return 0;
- }
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = nccip->minorp) != NULL) {
- count += atomic_read(&mp->ttyopencount);
- }
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- mutex_unlock(&cdev->ncci_list_mtx);
- return count;
- }
- return 0;
+ if (copy_from_user(&ncci, argp, sizeof(ncci)))
+ return -EFAULT;
+
+ mutex_lock(&cdev->lock);
+ nccip = capincci_find(cdev, (u32)ncci);
+ if (nccip)
+ count = capincci_minor_opencount(nccip);
+ mutex_unlock(&cdev->lock);
+ return count;
+ }
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- case CAPI_NCCI_GETUNIT:
- {
- struct capincci *nccip;
- struct capiminor *mp;
- unsigned ncci;
- int unit = 0;
- if (copy_from_user(&ncci, argp,
- sizeof(ncci)))
- return -EFAULT;
- mutex_lock(&cdev->ncci_list_mtx);
- nccip = capincci_find(cdev, (u32) ncci);
- if (!nccip || (mp = nccip->minorp) == NULL) {
- mutex_unlock(&cdev->ncci_list_mtx);
- return -ESRCH;
- }
- unit = mp->minor;
- mutex_unlock(&cdev->ncci_list_mtx);
- return unit;
+ case CAPI_NCCI_GETUNIT: {
+ struct capincci *nccip;
+ struct capiminor *mp;
+ unsigned ncci;
+ int unit = -ESRCH;
+
+ if (copy_from_user(&ncci, argp, sizeof(ncci)))
+ return -EFAULT;
+
+ mutex_lock(&cdev->lock);
+ nccip = capincci_find(cdev, (u32)ncci);
+ if (nccip) {
+ mp = nccip->minorp;
+ if (mp)
+ unit = mp->minor;
}
- return 0;
+ mutex_unlock(&cdev->lock);
+ return unit;
+ }
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+ default:
+ return -EINVAL;
}
- return -EINVAL;
}
-static int
-capi_open(struct inode *inode, struct file *file)
+static int capi_open(struct inode *inode, struct file *file)
{
- int ret;
-
- lock_kernel();
- if (file->private_data)
- ret = -EEXIST;
- else if ((file->private_data = capidev_alloc()) == NULL)
- ret = -ENOMEM;
- else
- ret = nonseekable_open(inode, file);
- unlock_kernel();
- return ret;
+ struct capidev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ mutex_init(&cdev->lock);
+ skb_queue_head_init(&cdev->recvqueue);
+ init_waitqueue_head(&cdev->recvwait);
+ INIT_LIST_HEAD(&cdev->nccis);
+ file->private_data = cdev;
+
+ mutex_lock(&capidev_list_lock);
+ list_add_tail(&cdev->list, &capidev_list);
+ mutex_unlock(&capidev_list_lock);
+
+ return nonseekable_open(inode, file);
}
-static int
-capi_release(struct inode *inode, struct file *file)
+static int capi_release(struct inode *inode, struct file *file)
{
- struct capidev *cdev = (struct capidev *)file->private_data;
+ struct capidev *cdev = file->private_data;
- capidev_free(cdev);
- file->private_data = NULL;
-
+ mutex_lock(&capidev_list_lock);
+ list_del(&cdev->list);
+ mutex_unlock(&capidev_list_lock);
+
+ if (cdev->ap.applid)
+ capi20_release(&cdev->ap);
+ skb_queue_purge(&cdev->recvqueue);
+ capincci_free(cdev, 0xffffffff);
+
+ kfree(cdev);
return 0;
}
@@ -1023,182 +1034,159 @@ static const struct file_operations capi_fops =
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
/* -------- tty_operations for capincci ----------------------------- */
-static int capinc_tty_open(struct tty_struct * tty, struct file * file)
+static int
+capinc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
- struct capiminor *mp;
- unsigned long flags;
+ int idx = tty->index;
+ struct capiminor *mp = capiminor_get(idx);
+ int ret = tty_init_termios(tty);
+
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ tty->count++;
+ tty->driver_data = mp;
+ driver->ttys[idx] = tty;
+ } else
+ capiminor_put(mp);
+ return ret;
+}
- if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == NULL)
- return -ENXIO;
- if (mp->nccip == NULL)
- return -ENXIO;
+static void capinc_tty_cleanup(struct tty_struct *tty)
+{
+ struct capiminor *mp = tty->driver_data;
+ tty->driver_data = NULL;
+ capiminor_put(mp);
+}
- tty->driver_data = (void *)mp;
+static int capinc_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct capiminor *mp = tty->driver_data;
+ int err;
+
+ err = tty_port_open(&mp->port, tty, filp);
+ if (err)
+ return err;
- spin_lock_irqsave(&workaround_lock, flags);
- if (atomic_read(&mp->ttyopencount) == 0)
- mp->tty = tty;
- atomic_inc(&mp->ttyopencount);
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_open ocount=%d\n", atomic_read(&mp->ttyopencount));
-#endif
handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
return 0;
}
-static void capinc_tty_close(struct tty_struct * tty, struct file * file)
+static void capinc_tty_close(struct tty_struct *tty, struct file *filp)
{
- struct capiminor *mp;
-
- mp = (struct capiminor *)tty->driver_data;
- if (mp) {
- if (atomic_dec_and_test(&mp->ttyopencount)) {
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close lastclose\n");
-#endif
- tty->driver_data = NULL;
- mp->tty = NULL;
- }
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close ocount=%d\n", atomic_read(&mp->ttyopencount));
-#endif
- if (mp->nccip == NULL)
- capiminor_free(mp);
- }
+ struct capiminor *mp = tty->driver_data;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close\n");
-#endif
+ tty_port_close(&mp->port, tty, filp);
}
-static int capinc_tty_write(struct tty_struct * tty,
+static int capinc_tty_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- unsigned long flags;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_write(count=%d)\n", count);
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_write: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
}
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN+count, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "capinc_tty_write: alloc_skb failed\n");
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
return -ENOMEM;
}
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
memcpy(skb_put(skb, count), buf, count);
- skb_queue_tail(&mp->outqueue, skb);
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
- (void)handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_send(mp);
+
return count;
}
static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
+ bool invoke_send = false;
struct sk_buff *skb;
- unsigned long flags;
int ret = 1;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_put_char(%u)\n", ch);
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
if (skb_tailroom(skb) > 0) {
*(skb_put(skb, 1)) = ch;
- spin_unlock_irqrestore(&workaround_lock, flags);
- return 1;
+ goto unlock_out;
}
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
+ invoke_send = true;
}
+
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN+CAPI_MAX_BLKSIZE, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
*(skb_put(skb, 1)) = ch;
- mp->ttyskb = skb;
+ mp->outskb = skb;
} else {
printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
ret = 0;
}
- spin_unlock_irqrestore(&workaround_lock, flags);
+
+unlock_out:
+ spin_unlock_bh(&mp->outlock);
+
+ if (invoke_send)
+ handle_minor_send(mp);
+
return ret;
}
static void capinc_tty_flush_chars(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- unsigned long flags;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_flush_chars\n");
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_flush_chars: mp or mp->ncci NULL\n");
-#endif
- return;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
- }
- (void)handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_send(mp);
+ } else
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_recv(mp);
}
static int capinc_tty_write_room(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
int room;
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_write_room: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
+
room = CAPINC_MAX_SENDQUEUE-skb_queue_len(&mp->outqueue);
room *= CAPI_MAX_BLKSIZE;
#ifdef _DEBUG_TTYFUNCS
@@ -1209,13 +1197,8 @@ static int capinc_tty_write_room(struct tty_struct *tty)
static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_chars_in_buffer: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_chars_in_buffer = %d nack=%d sq=%d rq=%d\n",
mp->outbytes, mp->nack,
@@ -1244,62 +1227,55 @@ static void capinc_tty_set_termios(struct tty_struct *tty, struct ktermios * old
#endif
}
-static void capinc_tty_throttle(struct tty_struct * tty)
+static void capinc_tty_throttle(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_throttle\n");
#endif
- if (mp)
- mp->ttyinstop = 1;
+ mp->ttyinstop = 1;
}
-static void capinc_tty_unthrottle(struct tty_struct * tty)
+static void capinc_tty_unthrottle(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- unsigned long flags;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_unthrottle\n");
#endif
- if (mp) {
- spin_lock_irqsave(&workaround_lock, flags);
- mp->ttyinstop = 0;
- handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
- }
+ mp->ttyinstop = 0;
+ handle_minor_recv(mp);
}
static void capinc_tty_stop(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_stop\n");
#endif
- if (mp) {
- mp->ttyoutstop = 1;
- }
+ mp->ttyoutstop = 1;
}
static void capinc_tty_start(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- unsigned long flags;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_start\n");
#endif
- if (mp) {
- spin_lock_irqsave(&workaround_lock, flags);
- mp->ttyoutstop = 0;
- (void)handle_minor_send(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
- }
+ mp->ttyoutstop = 0;
+ handle_minor_send(mp);
}
static void capinc_tty_hangup(struct tty_struct *tty)
{
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_hangup\n");
#endif
+ tty_port_hangup(&mp->port);
}
static int capinc_tty_break_ctl(struct tty_struct *tty, int state)
@@ -1331,8 +1307,6 @@ static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
#endif
}
-static struct tty_driver *capinc_tty_driver;
-
static const struct tty_operations capinc_ops = {
.open = capinc_tty_open,
.close = capinc_tty_close,
@@ -1352,25 +1326,34 @@ static const struct tty_operations capinc_ops = {
.flush_buffer = capinc_tty_flush_buffer,
.set_ldisc = capinc_tty_set_ldisc,
.send_xchar = capinc_tty_send_xchar,
+ .install = capinc_tty_install,
+ .cleanup = capinc_tty_cleanup,
};
-static int capinc_tty_init(void)
+static int __init capinc_tty_init(void)
{
struct tty_driver *drv;
-
+ int err;
+
if (capi_ttyminors > CAPINC_MAX_PORTS)
capi_ttyminors = CAPINC_MAX_PORTS;
if (capi_ttyminors <= 0)
capi_ttyminors = CAPINC_NR_PORTS;
- drv = alloc_tty_driver(capi_ttyminors);
- if (!drv)
+ capiminors = kzalloc(sizeof(struct capi_minor *) * capi_ttyminors,
+ GFP_KERNEL);
+ if (!capiminors)
return -ENOMEM;
+ drv = alloc_tty_driver(capi_ttyminors);
+ if (!drv) {
+ kfree(capiminors);
+ return -ENOMEM;
+ }
drv->owner = THIS_MODULE;
drv->driver_name = "capi_nc";
drv->name = "capi";
- drv->major = capi_ttymajor;
+ drv->major = 0;
drv->minor_start = 0;
drv->type = TTY_DRIVER_TYPE_SERIAL;
drv->subtype = SERIAL_TYPE_NORMAL;
@@ -1379,27 +1362,39 @@ static int capinc_tty_init(void)
drv->init_termios.c_oflag = OPOST | ONLCR;
drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
drv->init_termios.c_lflag = 0;
- drv->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_RESET_TERMIOS;
+ drv->flags =
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(drv, &capinc_ops);
- if (tty_register_driver(drv)) {
+
+ err = tty_register_driver(drv);
+ if (err) {
put_tty_driver(drv);
+ kfree(capiminors);
printk(KERN_ERR "Couldn't register capi_nc driver\n");
- return -1;
+ return err;
}
capinc_tty_driver = drv;
return 0;
}
-static void capinc_tty_exit(void)
+static void __exit capinc_tty_exit(void)
{
- struct tty_driver *drv = capinc_tty_driver;
- int retval;
- if ((retval = tty_unregister_driver(drv)))
- printk(KERN_ERR "capi: failed to unregister capi_nc driver (%d)\n", retval);
- put_tty_driver(drv);
+ tty_unregister_driver(capinc_tty_driver);
+ put_tty_driver(capinc_tty_driver);
+ kfree(capiminors);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static inline int capinc_tty_init(void)
+{
+ return 0;
+}
+
+static inline void capinc_tty_exit(void) { }
+
+#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- /proc functions ----------------------------------------- */
@@ -1407,134 +1402,91 @@ static void capinc_tty_exit(void)
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int proc_capidev_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
- int len = 0;
- read_lock(&capidev_list_lock);
+ mutex_lock(&capidev_list_lock);
list_for_each(l, &capidev_list) {
cdev = list_entry(l, struct capidev, list);
- len += sprintf(page+len, "0 %d %lu %lu %lu %lu\n",
+ seq_printf(m, "0 %d %lu %lu %lu %lu\n",
cdev->ap.applid,
cdev->ap.nrecvctlpkt,
cdev->ap.nrecvdatapkt,
cdev->ap.nsentctlpkt,
cdev->ap.nsentdatapkt);
- if (len <= off) {
- off -= len;
- len = 0;
- } else {
- if (len-off > count)
- goto endloop;
- }
}
+ mutex_unlock(&capidev_list_lock);
+ return 0;
+}
-endloop:
- read_unlock(&capidev_list_lock);
- if (len < count)
- *eof = 1;
- if (len > count) len = count;
- if (len < 0) len = 0;
- return len;
+static int capi20_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capi20_proc_show, NULL);
}
+static const struct file_operations capi20_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capi20_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* /proc/capi/capi20ncci:
* applid ncci
*/
-static int proc_capincci_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capi20ncci_proc_show(struct seq_file *m, void *v)
{
- struct capidev *cdev;
- struct capincci *np;
- struct list_head *l;
- int len = 0;
+ struct capidev *cdev;
+ struct capincci *np;
- read_lock(&capidev_list_lock);
- list_for_each(l, &capidev_list) {
- cdev = list_entry(l, struct capidev, list);
- for (np=cdev->nccis; np; np = np->next) {
- len += sprintf(page+len, "%d 0x%x\n",
- cdev->ap.applid,
- np->ncci);
- if (len <= off) {
- off -= len;
- len = 0;
- } else {
- if (len-off > count)
- goto endloop;
- }
- }
+ mutex_lock(&capidev_list_lock);
+ list_for_each_entry(cdev, &capidev_list, list) {
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(np, &cdev->nccis, list)
+ seq_printf(m, "%d 0x%x\n", cdev->ap.applid, np->ncci);
+ mutex_unlock(&cdev->lock);
}
-endloop:
- read_unlock(&capidev_list_lock);
- *start = page+off;
- if (len < count)
- *eof = 1;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ mutex_unlock(&capidev_list_lock);
+ return 0;
+}
+
+static int capi20ncci_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capi20ncci_proc_show, NULL);
}
-static struct procfsentries {
- char *name;
- mode_t mode;
- int (*read_proc)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
- struct proc_dir_entry *procent;
-} procfsentries[] = {
- /* { "capi", S_IFDIR, 0 }, */
- { "capi/capi20", 0 , proc_capidev_read_proc },
- { "capi/capi20ncci", 0 , proc_capincci_read_proc },
+static const struct file_operations capi20ncci_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capi20ncci_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void __init proc_init(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=0; i < nelem; i++) {
- struct procfsentries *p = procfsentries + i;
- p->procent = create_proc_entry(p->name, p->mode, NULL);
- if (p->procent) p->procent->read_proc = p->read_proc;
- }
+ proc_create("capi/capi20", 0, NULL, &capi20_proc_fops);
+ proc_create("capi/capi20ncci", 0, NULL, &capi20ncci_proc_fops);
}
static void __exit proc_exit(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=nelem-1; i >= 0; i--) {
- struct procfsentries *p = procfsentries + i;
- if (p->procent) {
- remove_proc_entry(p->name, NULL);
- p->procent = NULL;
- }
- }
+ remove_proc_entry("capi/capi20", NULL);
+ remove_proc_entry("capi/capi20ncci", NULL);
}
/* -------- init function and module interface ---------------------- */
-static char rev[32];
-
static int __init capi_init(void)
{
- char *p;
- char *compileinfo;
+ const char *compileinfo;
int major_ret;
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
if (major_ret < 0) {
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
@@ -1548,28 +1500,24 @@ static int __init capi_init(void)
device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi");
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
if (capinc_tty_init() < 0) {
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
return -ENOMEM;
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
proc_init();
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
compileinfo = " (middleware+capifs)";
-#else
+#elif defined(CONFIG_ISDN_CAPI_MIDDLEWARE)
compileinfo = " (no capifs)";
-#endif
#else
compileinfo = " (no middleware)";
#endif
- printk(KERN_NOTICE "capi20: Rev %s: started up with major %d%s\n",
- rev, capi_major, compileinfo);
+ printk(KERN_NOTICE "CAPI 2.0 started up with major %d%s\n",
+ capi_major, compileinfo);
return 0;
}
@@ -1582,10 +1530,7 @@ static void __exit capi_exit(void)
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
capinc_tty_exit();
-#endif
- printk(KERN_NOTICE "capi: Rev %s: unloaded\n", rev);
}
module_init(capi_init);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 66b7d7a86474..bf55ed5f38e3 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -24,6 +24,7 @@
#include <linux/isdn.h>
#include <linux/isdnif.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/ctype.h>
@@ -34,7 +35,6 @@
#include <linux/isdn/capicmd.h>
#include "capidrv.h"
-static char *revision = "$Revision: 1.1.2.2 $";
static int debugmode = 0;
MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
@@ -2210,96 +2210,73 @@ static int capidrv_delcontr(u16 contr)
}
-static void lower_callback(unsigned int cmd, u32 contr, void *data)
+static int
+lower_callback(struct notifier_block *nb, unsigned long val, void *v)
{
+ capi_profile profile;
+ u32 contr = (long)v;
- switch (cmd) {
- case KCI_CONTRUP:
+ switch (val) {
+ case CAPICTR_UP:
printk(KERN_INFO "capidrv: controller %hu up\n", contr);
- (void) capidrv_addcontr(contr, (capi_profile *) data);
+ if (capi20_get_profile(contr, &profile) == CAPI_NOERROR)
+ (void) capidrv_addcontr(contr, &profile);
break;
- case KCI_CONTRDOWN:
+ case CAPICTR_DOWN:
printk(KERN_INFO "capidrv: controller %hu down\n", contr);
(void) capidrv_delcontr(contr);
break;
}
+ return NOTIFY_OK;
}
/*
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int proc_capidrv_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capidrv_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
-
- len += sprintf(page+len, "%lu %lu %lu %lu\n",
+ seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
global.ap.nrecvdatapkt,
global.ap.nsentctlpkt,
global.ap.nsentdatapkt);
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ return 0;
}
-static struct procfsentries {
- char *name;
- mode_t mode;
- int (*read_proc)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
- struct proc_dir_entry *procent;
-} procfsentries[] = {
- /* { "capi", S_IFDIR, 0 }, */
- { "capi/capidrv", 0 , proc_capidrv_read_proc },
+static int capidrv_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capidrv_proc_show, NULL);
+}
+
+static const struct file_operations capidrv_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capidrv_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void __init proc_init(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=0; i < nelem; i++) {
- struct procfsentries *p = procfsentries + i;
- p->procent = create_proc_entry(p->name, p->mode, NULL);
- if (p->procent) p->procent->read_proc = p->read_proc;
- }
+ proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
}
static void __exit proc_exit(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=nelem-1; i >= 0; i--) {
- struct procfsentries *p = procfsentries + i;
- if (p->procent) {
- remove_proc_entry(p->name, NULL);
- p->procent = NULL;
- }
- }
+ remove_proc_entry("capi/capidrv", NULL);
}
+static struct notifier_block capictr_nb = {
+ .notifier_call = lower_callback,
+};
+
static int __init capidrv_init(void)
{
capi_profile profile;
- char rev[32];
- char *p;
u32 ncontr, contr;
u16 errcode;
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strncpy(rev, p + 2, sizeof(rev));
- rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
global.ap.rparam.level3cnt = -2; /* number of bchannels twice */
global.ap.rparam.datablkcnt = 16;
global.ap.rparam.datablklen = 2048;
@@ -2310,7 +2287,7 @@ static int __init capidrv_init(void)
return -EIO;
}
- capi20_set_callback(&global.ap, lower_callback);
+ register_capictr_notifier(&capictr_nb);
errcode = capi20_get_profile(0, &profile);
if (errcode != CAPI_NOERROR) {
@@ -2327,29 +2304,15 @@ static int __init capidrv_init(void)
}
proc_init();
- printk(KERN_NOTICE "capidrv: Rev %s: loaded\n", rev);
return 0;
}
static void __exit capidrv_exit(void)
{
- char rev[32];
- char *p;
-
- if ((p = strchr(revision, ':')) != NULL) {
- strncpy(rev, p + 1, sizeof(rev));
- rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != NULL)
- *p = 0;
- } else {
- strcpy(rev, " ??? ");
- }
-
+ unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
proc_exit();
-
- printk(KERN_NOTICE "capidrv: Rev%s: unloaded\n", rev);
}
module_init(capidrv_init);
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 9f8f67b6c07f..8596bd1a4d26 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -25,14 +25,10 @@ MODULE_LICENSE("GPL");
/* ------------------------------------------------------------------ */
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------------ */
-
#define CAPIFS_SUPER_MAGIC (('C'<<8)|'N')
static struct vfsmount *capifs_mnt;
-static struct dentry *capifs_root;
+static int capifs_mnt_count;
static struct {
int setuid;
@@ -118,7 +114,7 @@ capifs_fill_super(struct super_block *s, void *data, int silent)
inode->i_fop = &simple_dir_operations;
inode->i_nlink = 2;
- capifs_root = s->s_root = d_alloc_root(inode);
+ s->s_root = d_alloc_root(inode);
if (s->s_root)
return 0;
@@ -141,82 +137,98 @@ static struct file_system_type capifs_fs_type = {
.kill_sb = kill_anon_super,
};
-static struct dentry *get_node(int num)
+static struct dentry *new_ncci(unsigned int number, dev_t device)
{
- char s[10];
- struct dentry *root = capifs_root;
+ struct super_block *s = capifs_mnt->mnt_sb;
+ struct dentry *root = s->s_root;
+ struct dentry *dentry;
+ struct inode *inode;
+ char name[10];
+ int namelen;
+
mutex_lock(&root->d_inode->i_mutex);
- return lookup_one_len(s, root, sprintf(s, "%d", num));
-}
-void capifs_new_ncci(unsigned int number, dev_t device)
-{
- struct dentry *dentry;
- struct inode *inode = new_inode(capifs_mnt->mnt_sb);
- if (!inode)
- return;
- inode->i_ino = number+2;
+ namelen = sprintf(name, "%d", number);
+ dentry = lookup_one_len(name, root, namelen);
+ if (IS_ERR(dentry)) {
+ dentry = NULL;
+ goto unlock_out;
+ }
- dentry = get_node(number);
+ if (dentry->d_inode) {
+ dput(dentry);
+ dentry = NULL;
+ goto unlock_out;
+ }
+
+ inode = new_inode(s);
+ if (!inode) {
+ dput(dentry);
+ dentry = NULL;
+ goto unlock_out;
+ }
/* config contents is protected by root's i_mutex */
inode->i_uid = config.setuid ? config.uid : current_fsuid();
inode->i_gid = config.setgid ? config.gid : current_fsgid();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_ino = number + 2;
init_special_inode(inode, S_IFCHR|config.mode, device);
- //inode->i_op = &capifs_file_inode_operations;
- if (!IS_ERR(dentry) && !dentry->d_inode)
- d_instantiate(dentry, inode);
- mutex_unlock(&capifs_root->d_inode->i_mutex);
+ d_instantiate(dentry, inode);
+ dget(dentry);
+
+unlock_out:
+ mutex_unlock(&root->d_inode->i_mutex);
+
+ return dentry;
}
-void capifs_free_ncci(unsigned int number)
+struct dentry *capifs_new_ncci(unsigned int number, dev_t device)
{
- struct dentry *dentry = get_node(number);
-
- if (!IS_ERR(dentry)) {
- struct inode *inode = dentry->d_inode;
- if (inode) {
- inode->i_nlink--;
- d_delete(dentry);
- dput(dentry);
- }
+ struct dentry *dentry;
+
+ if (simple_pin_fs(&capifs_fs_type, &capifs_mnt, &capifs_mnt_count) < 0)
+ return NULL;
+
+ dentry = new_ncci(number, device);
+ if (!dentry)
+ simple_release_fs(&capifs_mnt, &capifs_mnt_count);
+
+ return dentry;
+}
+
+void capifs_free_ncci(struct dentry *dentry)
+{
+ struct dentry *root = capifs_mnt->mnt_sb->s_root;
+ struct inode *inode;
+
+ if (!dentry)
+ return;
+
+ mutex_lock(&root->d_inode->i_mutex);
+
+ inode = dentry->d_inode;
+ if (inode) {
+ drop_nlink(inode);
+ d_delete(dentry);
dput(dentry);
}
- mutex_unlock(&capifs_root->d_inode->i_mutex);
+ dput(dentry);
+
+ mutex_unlock(&root->d_inode->i_mutex);
+
+ simple_release_fs(&capifs_mnt, &capifs_mnt_count);
}
static int __init capifs_init(void)
{
- char rev[32];
- char *p;
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = register_filesystem(&capifs_fs_type);
- if (!err) {
- capifs_mnt = kern_mount(&capifs_fs_type);
- if (IS_ERR(capifs_mnt)) {
- err = PTR_ERR(capifs_mnt);
- unregister_filesystem(&capifs_fs_type);
- }
- }
- if (!err)
- printk(KERN_NOTICE "capifs: Rev %s\n", rev);
- return err;
+ return register_filesystem(&capifs_fs_type);
}
static void __exit capifs_exit(void)
{
unregister_filesystem(&capifs_fs_type);
- mntput(capifs_mnt);
}
EXPORT_SYMBOL(capifs_new_ncci);
diff --git a/drivers/isdn/capi/capifs.h b/drivers/isdn/capi/capifs.h
index d0bd4c3c430a..e193d1189531 100644
--- a/drivers/isdn/capi/capifs.h
+++ b/drivers/isdn/capi/capifs.h
@@ -7,5 +7,22 @@
*
*/
-void capifs_new_ncci(unsigned int num, dev_t device);
-void capifs_free_ncci(unsigned int num);
+#include <linux/dcache.h>
+
+#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
+
+struct dentry *capifs_new_ncci(unsigned int num, dev_t device);
+void capifs_free_ncci(struct dentry *dentry);
+
+#else
+
+static inline struct dentry *capifs_new_ncci(unsigned int num, dev_t device)
+{
+ return NULL;
+}
+
+static inline void capifs_free_ncci(struct dentry *dentry)
+{
+}
+
+#endif
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dc506ab99cac..ce9b05b9e93a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -34,10 +34,7 @@
#include <linux/b1lli.h>
#endif
#include <linux/mutex.h>
-
-static char *revision = "$Revision: 1.1.2.8 $";
-
-/* ------------------------------------------------------------- */
+#include <linux/rcupdate.h>
static int showcapimsgs = 0;
@@ -48,12 +45,10 @@ module_param(showcapimsgs, uint, 0);
/* ------------------------------------------------------------- */
-struct capi_notifier {
+struct capictr_event {
struct work_struct work;
- unsigned int cmd;
+ unsigned int type;
u32 controller;
- u16 applid;
- u32 ncci;
};
/* ------------------------------------------------------------- */
@@ -65,30 +60,31 @@ static char capi_manufakturer[64] = "AVM Berlin";
#define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f)
LIST_HEAD(capi_drivers);
-DEFINE_RWLOCK(capi_drivers_list_lock);
+DEFINE_MUTEX(capi_drivers_lock);
-static DEFINE_RWLOCK(application_lock);
-static DEFINE_MUTEX(controller_mutex);
+struct capi_ctr *capi_controller[CAPI_MAXCONTR];
+DEFINE_MUTEX(capi_controller_lock);
struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-struct capi_ctr *capi_cards[CAPI_MAXCONTR];
-static int ncards;
+static int ncontrollers;
+
+static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list);
/* -------- controller ref counting -------------------------------------- */
static inline struct capi_ctr *
-capi_ctr_get(struct capi_ctr *card)
+capi_ctr_get(struct capi_ctr *ctr)
{
- if (!try_module_get(card->owner))
+ if (!try_module_get(ctr->owner))
return NULL;
- return card;
+ return ctr;
}
static inline void
-capi_ctr_put(struct capi_ctr *card)
+capi_ctr_put(struct capi_ctr *ctr)
{
- module_put(card->owner);
+ module_put(ctr->owner);
}
/* ------------------------------------------------------------- */
@@ -98,7 +94,7 @@ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
if (contr - 1 >= CAPI_MAXCONTR)
return NULL;
- return capi_cards[contr - 1];
+ return capi_controller[contr - 1];
}
static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
@@ -106,7 +102,7 @@ static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
if (applid - 1 >= CAPI_MAXAPPL)
return NULL;
- return capi_applications[applid - 1];
+ return rcu_dereference(capi_applications[applid - 1]);
}
/* -------- util functions ------------------------------------ */
@@ -148,106 +144,159 @@ static inline int capi_subcmd_valid(u8 subcmd)
/* ------------------------------------------------------------ */
-static void register_appl(struct capi_ctr *card, u16 applid, capi_register_params *rparam)
+static void
+register_appl(struct capi_ctr *ctr, u16 applid, capi_register_params *rparam)
{
- card = capi_ctr_get(card);
+ ctr = capi_ctr_get(ctr);
- if (card)
- card->register_appl(card, applid, rparam);
+ if (ctr)
+ ctr->register_appl(ctr, applid, rparam);
else
- printk(KERN_WARNING "%s: cannot get card resources\n", __func__);
+ printk(KERN_WARNING "%s: cannot get controller resources\n",
+ __func__);
}
-static void release_appl(struct capi_ctr *card, u16 applid)
+static void release_appl(struct capi_ctr *ctr, u16 applid)
{
DBG("applid %#x", applid);
- card->release_appl(card, applid);
- capi_ctr_put(card);
+ ctr->release_appl(ctr, applid);
+ capi_ctr_put(ctr);
}
-/* -------- KCI_CONTRUP --------------------------------------- */
-
static void notify_up(u32 contr)
{
- struct capi_ctr *card = get_capi_ctr_by_nr(contr);
struct capi20_appl *ap;
+ struct capi_ctr *ctr;
u16 applid;
- if (showcapimsgs & 1) {
+ mutex_lock(&capi_controller_lock);
+
+ if (showcapimsgs & 1)
printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);
- }
- if (!card) {
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr) {
+ if (ctr->state == CAPI_CTR_RUNNING)
+ goto unlock_out;
+
+ ctr->state = CAPI_CTR_RUNNING;
+
+ for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
+ ap = get_capi_appl_by_nr(applid);
+ if (!ap)
+ continue;
+ register_appl(ctr, applid, &ap->rparam);
+ }
+
+ wake_up_interruptible_all(&ctr->state_wait_queue);
+ } else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
- return;
- }
- for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
- ap = get_capi_appl_by_nr(applid);
- if (!ap || ap->release_in_progress) continue;
- register_appl(card, applid, &ap->rparam);
- if (ap->callback && !ap->release_in_progress)
- ap->callback(KCI_CONTRUP, contr, &card->profile);
- }
-}
-/* -------- KCI_CONTRDOWN ------------------------------------- */
+unlock_out:
+ mutex_unlock(&capi_controller_lock);
+}
-static void notify_down(u32 contr)
+static void ctr_down(struct capi_ctr *ctr, int new_state)
{
struct capi20_appl *ap;
u16 applid;
- if (showcapimsgs & 1) {
- printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr);
- }
+ if (ctr->state == CAPI_CTR_DETECTED || ctr->state == CAPI_CTR_DETACHED)
+ return;
+
+ ctr->state = new_state;
+
+ memset(ctr->manu, 0, sizeof(ctr->manu));
+ memset(&ctr->version, 0, sizeof(ctr->version));
+ memset(&ctr->profile, 0, sizeof(ctr->profile));
+ memset(ctr->serial, 0, sizeof(ctr->serial));
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
ap = get_capi_appl_by_nr(applid);
- if (ap && ap->callback && !ap->release_in_progress)
- ap->callback(KCI_CONTRDOWN, contr, NULL);
+ if (ap)
+ capi_ctr_put(ctr);
}
+
+ wake_up_interruptible_all(&ctr->state_wait_queue);
}
-static void notify_handler(struct work_struct *work)
+static void notify_down(u32 contr)
{
- struct capi_notifier *np =
- container_of(work, struct capi_notifier, work);
+ struct capi_ctr *ctr;
- switch (np->cmd) {
- case KCI_CONTRUP:
- notify_up(np->controller);
+ mutex_lock(&capi_controller_lock);
+
+ if (showcapimsgs & 1)
+ printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr)
+ ctr_down(ctr, CAPI_CTR_DETECTED);
+ else
+ printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
+
+ mutex_unlock(&capi_controller_lock);
+}
+
+static int
+notify_handler(struct notifier_block *nb, unsigned long val, void *v)
+{
+ u32 contr = (long)v;
+
+ switch (val) {
+ case CAPICTR_UP:
+ notify_up(contr);
break;
- case KCI_CONTRDOWN:
- notify_down(np->controller);
+ case CAPICTR_DOWN:
+ notify_down(contr);
break;
}
+ return NOTIFY_OK;
+}
+
+static void do_notify_work(struct work_struct *work)
+{
+ struct capictr_event *event =
+ container_of(work, struct capictr_event, work);
- kfree(np);
+ blocking_notifier_call_chain(&ctr_notifier_list, event->type,
+ (void *)(long)event->controller);
+ kfree(event);
}
/*
* The notifier will result in adding/deleteing of devices. Devices can
* only removed in user process, not in bh.
*/
-static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
+static int notify_push(unsigned int event_type, u32 controller)
{
- struct capi_notifier *np = kmalloc(sizeof(*np), GFP_ATOMIC);
+ struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
- if (!np)
+ if (!event)
return -ENOMEM;
- INIT_WORK(&np->work, notify_handler);
- np->cmd = cmd;
- np->controller = controller;
- np->applid = applid;
- np->ncci = ncci;
+ INIT_WORK(&event->work, do_notify_work);
+ event->type = event_type;
+ event->controller = controller;
- schedule_work(&np->work);
+ schedule_work(&event->work);
return 0;
}
-
+int register_capictr_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ctr_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_capictr_notifier);
+
+int unregister_capictr_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ctr_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_capictr_notifier);
+
/* -------- Receiver ------------------------------------------ */
static void recv_handler(struct work_struct *work)
@@ -273,68 +322,70 @@ static void recv_handler(struct work_struct *work)
/**
* capi_ctr_handle_message() - handle incoming CAPI message
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
* @appl: application ID.
* @skb: message.
*
* Called by hardware driver to pass a CAPI message to the application.
*/
-void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
+void capi_ctr_handle_message(struct capi_ctr *ctr, u16 appl,
+ struct sk_buff *skb)
{
struct capi20_appl *ap;
int showctl = 0;
u8 cmd, subcmd;
- unsigned long flags;
_cdebbuf *cdb;
- if (card->cardstate != CARD_RUNNING) {
+ if (ctr->state != CAPI_CTR_RUNNING) {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
- card->cnr, cdb->buf);
+ ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
- card->cnr);
+ ctr->cnr);
goto error;
}
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd == CAPI_IND) {
- card->nrecvdatapkt++;
- if (card->traceflag > 2) showctl |= 2;
+ ctr->nrecvdatapkt++;
+ if (ctr->traceflag > 2)
+ showctl |= 2;
} else {
- card->nrecvctlpkt++;
- if (card->traceflag) showctl |= 2;
+ ctr->nrecvctlpkt++;
+ if (ctr->traceflag)
+ showctl |= 2;
}
- showctl |= (card->traceflag & 1);
+ showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
- card->cnr, CAPIMSG_APPID(skb->data),
+ ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
- card->cnr, cdb->buf);
+ ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
- card->cnr, CAPIMSG_APPID(skb->data),
+ ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
}
}
- read_lock_irqsave(&application_lock, flags);
+ rcu_read_lock();
ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
- if ((!ap) || (ap->release_in_progress)) {
- read_unlock_irqrestore(&application_lock, flags);
+ if (!ap) {
+ rcu_read_unlock();
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
@@ -348,7 +399,7 @@ void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *s
}
skb_queue_tail(&ap->recv_queue, skb);
schedule_work(&ap->recv_work);
- read_unlock_irqrestore(&application_lock, flags);
+ rcu_read_unlock();
return;
@@ -360,74 +411,54 @@ EXPORT_SYMBOL(capi_ctr_handle_message);
/**
* capi_ctr_ready() - signal CAPI controller ready
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is up and running.
*/
-void capi_ctr_ready(struct capi_ctr * card)
+void capi_ctr_ready(struct capi_ctr *ctr)
{
- card->cardstate = CARD_RUNNING;
-
- printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n",
- card->cnr, card->name);
+ printk(KERN_NOTICE "kcapi: controller [%03d] \"%s\" ready.\n",
+ ctr->cnr, ctr->name);
- notify_push(KCI_CONTRUP, card->cnr, 0, 0);
+ notify_push(CAPICTR_UP, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_ready);
/**
* capi_ctr_down() - signal CAPI controller not ready
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is down and
* unavailable for use.
*/
-void capi_ctr_down(struct capi_ctr * card)
+void capi_ctr_down(struct capi_ctr *ctr)
{
- u16 appl;
-
- DBG("");
-
- if (card->cardstate == CARD_DETECTED)
- return;
-
- card->cardstate = CARD_DETECTED;
-
- memset(card->manu, 0, sizeof(card->manu));
- memset(&card->version, 0, sizeof(card->version));
- memset(&card->profile, 0, sizeof(card->profile));
- memset(card->serial, 0, sizeof(card->serial));
-
- for (appl = 1; appl <= CAPI_MAXAPPL; appl++) {
- struct capi20_appl *ap = get_capi_appl_by_nr(appl);
- if (!ap || ap->release_in_progress)
- continue;
-
- capi_ctr_put(card);
- }
-
- printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr);
+ printk(KERN_NOTICE "kcapi: controller [%03d] down.\n", ctr->cnr);
- notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
+ notify_push(CAPICTR_DOWN, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_down);
/**
* capi_ctr_suspend_output() - suspend controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to stop data flow.
+ *
+ * Note: The caller is responsible for synchronizing concurrent state changes
+ * as well as invocations of capi_ctr_handle_message.
*/
-void capi_ctr_suspend_output(struct capi_ctr *card)
+void capi_ctr_suspend_output(struct capi_ctr *ctr)
{
- if (!card->blocked) {
- printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr);
- card->blocked = 1;
+ if (!ctr->blocked) {
+ printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n",
+ ctr->cnr);
+ ctr->blocked = 1;
}
}
@@ -435,16 +466,20 @@ EXPORT_SYMBOL(capi_ctr_suspend_output);
/**
* capi_ctr_resume_output() - resume controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to resume data flow.
+ *
+ * Note: The caller is responsible for synchronizing concurrent state changes
+ * as well as invocations of capi_ctr_handle_message.
*/
-void capi_ctr_resume_output(struct capi_ctr *card)
+void capi_ctr_resume_output(struct capi_ctr *ctr)
{
- if (card->blocked) {
- printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr);
- card->blocked = 0;
+ if (ctr->blocked) {
+ printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n",
+ ctr->cnr);
+ ctr->blocked = 0;
}
}
@@ -454,53 +489,48 @@ EXPORT_SYMBOL(capi_ctr_resume_output);
/**
* attach_capi_ctr() - register CAPI controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to register a controller with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
-int
-attach_capi_ctr(struct capi_ctr *card)
+int attach_capi_ctr(struct capi_ctr *ctr)
{
int i;
- mutex_lock(&controller_mutex);
+ mutex_lock(&capi_controller_lock);
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (capi_cards[i] == NULL)
+ if (!capi_controller[i])
break;
}
if (i == CAPI_MAXCONTR) {
- mutex_unlock(&controller_mutex);
+ mutex_unlock(&capi_controller_lock);
printk(KERN_ERR "kcapi: out of controller slots\n");
return -EBUSY;
}
- capi_cards[i] = card;
-
- mutex_unlock(&controller_mutex);
-
- card->nrecvctlpkt = 0;
- card->nrecvdatapkt = 0;
- card->nsentctlpkt = 0;
- card->nsentdatapkt = 0;
- card->cnr = i + 1;
- card->cardstate = CARD_DETECTED;
- card->blocked = 0;
- card->traceflag = showcapimsgs;
-
- sprintf(card->procfn, "capi/controllers/%d", card->cnr);
- card->procent = create_proc_entry(card->procfn, 0, NULL);
- if (card->procent) {
- card->procent->read_proc =
- (int (*)(char *,char **,off_t,int,int *,void *))
- card->ctr_read_proc;
- card->procent->data = card;
- }
+ capi_controller[i] = ctr;
+
+ ctr->nrecvctlpkt = 0;
+ ctr->nrecvdatapkt = 0;
+ ctr->nsentctlpkt = 0;
+ ctr->nsentdatapkt = 0;
+ ctr->cnr = i + 1;
+ ctr->state = CAPI_CTR_DETECTED;
+ ctr->blocked = 0;
+ ctr->traceflag = showcapimsgs;
+ init_waitqueue_head(&ctr->state_wait_queue);
- ncards++;
- printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
- card->cnr, card->name);
+ sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
+ ctr->procent = proc_create_data(ctr->procfn, 0, NULL, ctr->proc_fops, ctr);
+
+ ncontrollers++;
+
+ mutex_unlock(&capi_controller_lock);
+
+ printk(KERN_NOTICE "kcapi: controller [%03d]: %s attached\n",
+ ctr->cnr, ctr->name);
return 0;
}
@@ -508,29 +538,38 @@ EXPORT_SYMBOL(attach_capi_ctr);
/**
* detach_capi_ctr() - unregister CAPI controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to remove the registration of a controller
* with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
-int detach_capi_ctr(struct capi_ctr *card)
+int detach_capi_ctr(struct capi_ctr *ctr)
{
- if (card->cardstate != CARD_DETECTED)
- capi_ctr_down(card);
+ int err = 0;
- ncards--;
+ mutex_lock(&capi_controller_lock);
- if (card->procent) {
- remove_proc_entry(card->procfn, NULL);
- card->procent = NULL;
+ ctr_down(ctr, CAPI_CTR_DETACHED);
+
+ if (capi_controller[ctr->cnr - 1] != ctr) {
+ err = -EINVAL;
+ goto unlock_out;
}
- capi_cards[card->cnr - 1] = NULL;
- printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n",
- card->cnr, card->name);
+ capi_controller[ctr->cnr - 1] = NULL;
+ ncontrollers--;
- return 0;
+ if (ctr->procent)
+ remove_proc_entry(ctr->procfn, NULL);
+
+ printk(KERN_NOTICE "kcapi: controller [%03d]: %s unregistered\n",
+ ctr->cnr, ctr->name);
+
+unlock_out:
+ mutex_unlock(&capi_controller_lock);
+
+ return err;
}
EXPORT_SYMBOL(detach_capi_ctr);
@@ -544,11 +583,9 @@ EXPORT_SYMBOL(detach_capi_ctr);
void register_capi_driver(struct capi_driver *driver)
{
- unsigned long flags;
-
- write_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
list_add_tail(&driver->list, &capi_drivers);
- write_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
}
EXPORT_SYMBOL(register_capi_driver);
@@ -562,11 +599,9 @@ EXPORT_SYMBOL(register_capi_driver);
void unregister_capi_driver(struct capi_driver *driver)
{
- unsigned long flags;
-
- write_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
list_del(&driver->list);
- write_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
}
EXPORT_SYMBOL(unregister_capi_driver);
@@ -584,12 +619,21 @@ EXPORT_SYMBOL(unregister_capi_driver);
u16 capi20_isinstalled(void)
{
+ u16 ret = CAPI_REGNOTINSTALLED;
int i;
- for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (capi_cards[i] && capi_cards[i]->cardstate == CARD_RUNNING)
- return CAPI_NOERROR;
- }
- return CAPI_REGNOTINSTALLED;
+
+ mutex_lock(&capi_controller_lock);
+
+ for (i = 0; i < CAPI_MAXCONTR; i++)
+ if (capi_controller[i] &&
+ capi_controller[i]->state == CAPI_CTR_RUNNING) {
+ ret = CAPI_NOERROR;
+ break;
+ }
+
+ mutex_unlock(&capi_controller_lock);
+
+ return ret;
}
EXPORT_SYMBOL(capi20_isinstalled);
@@ -610,46 +654,43 @@ u16 capi20_register(struct capi20_appl *ap)
{
int i;
u16 applid;
- unsigned long flags;
DBG("");
if (ap->rparam.datablklen < 128)
return CAPI_LOGBLKSIZETOSMALL;
- write_lock_irqsave(&application_lock, flags);
+ ap->nrecvctlpkt = 0;
+ ap->nrecvdatapkt = 0;
+ ap->nsentctlpkt = 0;
+ ap->nsentdatapkt = 0;
+ mutex_init(&ap->recv_mtx);
+ skb_queue_head_init(&ap->recv_queue);
+ INIT_WORK(&ap->recv_work, recv_handler);
+ ap->release_in_progress = 0;
+
+ mutex_lock(&capi_controller_lock);
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
if (capi_applications[applid - 1] == NULL)
break;
}
if (applid > CAPI_MAXAPPL) {
- write_unlock_irqrestore(&application_lock, flags);
+ mutex_unlock(&capi_controller_lock);
return CAPI_TOOMANYAPPLS;
}
ap->applid = applid;
capi_applications[applid - 1] = ap;
- ap->nrecvctlpkt = 0;
- ap->nrecvdatapkt = 0;
- ap->nsentctlpkt = 0;
- ap->nsentdatapkt = 0;
- ap->callback = NULL;
- mutex_init(&ap->recv_mtx);
- skb_queue_head_init(&ap->recv_queue);
- INIT_WORK(&ap->recv_work, recv_handler);
- ap->release_in_progress = 0;
-
- write_unlock_irqrestore(&application_lock, flags);
-
- mutex_lock(&controller_mutex);
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
+ if (!capi_controller[i] ||
+ capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
- register_appl(capi_cards[i], applid, &ap->rparam);
+ register_appl(capi_controller[i], applid, &ap->rparam);
}
- mutex_unlock(&controller_mutex);
+
+ mutex_unlock(&capi_controller_lock);
if (showcapimsgs & 1) {
printk(KERN_DEBUG "kcapi: appl %d up\n", applid);
@@ -673,22 +714,24 @@ EXPORT_SYMBOL(capi20_register);
u16 capi20_release(struct capi20_appl *ap)
{
int i;
- unsigned long flags;
DBG("applid %#x", ap->applid);
- write_lock_irqsave(&application_lock, flags);
+ mutex_lock(&capi_controller_lock);
+
ap->release_in_progress = 1;
capi_applications[ap->applid - 1] = NULL;
- write_unlock_irqrestore(&application_lock, flags);
- mutex_lock(&controller_mutex);
+ synchronize_rcu();
+
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
+ if (!capi_controller[i] ||
+ capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
- release_appl(capi_cards[i], ap->applid);
+ release_appl(capi_controller[i], ap->applid);
}
- mutex_unlock(&controller_mutex);
+
+ mutex_unlock(&capi_controller_lock);
flush_scheduled_work();
skb_queue_purge(&ap->recv_queue);
@@ -713,13 +756,13 @@ EXPORT_SYMBOL(capi20_release);
u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
int showctl = 0;
u8 cmd, subcmd;
DBG("applid %#x", ap->applid);
- if (ncards == 0)
+ if (ncontrollers == 0)
return CAPI_REGNOTINSTALLED;
if ((ap->applid == 0) || ap->release_in_progress)
return CAPI_ILLAPPNR;
@@ -727,28 +770,33 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
|| !capi_cmd_valid(CAPIMSG_COMMAND(skb->data))
|| !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data)))
return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- card = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
- if (!card || card->cardstate != CARD_RUNNING) {
- card = get_capi_ctr_by_nr(1); // XXX why?
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- }
- if (card->blocked)
+
+ /*
+ * The controller reference is protected by the existence of the
+ * application passed to us. We assume that the caller properly
+ * synchronizes this service with capi20_release.
+ */
+ ctr = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
+ if (!ctr || ctr->state != CAPI_CTR_RUNNING)
+ return CAPI_REGNOTINSTALLED;
+ if (ctr->blocked)
return CAPI_SENDQUEUEFULL;
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd== CAPI_REQ) {
- card->nsentdatapkt++;
+ ctr->nsentdatapkt++;
ap->nsentdatapkt++;
- if (card->traceflag > 2) showctl |= 2;
+ if (ctr->traceflag > 2)
+ showctl |= 2;
} else {
- card->nsentctlpkt++;
+ ctr->nsentctlpkt++;
ap->nsentctlpkt++;
- if (card->traceflag) showctl |= 2;
+ if (ctr->traceflag)
+ showctl |= 2;
}
- showctl |= (card->traceflag & 1);
+ showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
@@ -771,7 +819,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
CAPIMSG_LEN(skb->data));
}
}
- return card->send_message(card, skb);
+ return ctr->send_message(ctr, skb);
}
EXPORT_SYMBOL(capi20_put_message);
@@ -788,17 +836,25 @@ EXPORT_SYMBOL(capi20_put_message);
u16 capi20_get_manufacturer(u32 contr, u8 *buf)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- strlcpy(buf, card->manu, CAPI_MANUFACTURER_LEN);
- return CAPI_NOERROR;
+
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_manufacturer);
@@ -815,18 +871,25 @@ EXPORT_SYMBOL(capi20_get_manufacturer);
u16 capi20_get_version(u32 contr, struct capi_version *verp)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
*verp = driver_version;
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- memcpy((void *) verp, &card->version, sizeof(capi_version));
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ memcpy(verp, &ctr->version, sizeof(capi_version));
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_version);
@@ -843,18 +906,25 @@ EXPORT_SYMBOL(capi20_get_version);
u16 capi20_get_serial(u32 contr, u8 *serial)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
strlcpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- strlcpy((void *) serial, card->serial, CAPI_SERIAL_LEN);
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN);
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_serial);
@@ -871,23 +941,65 @@ EXPORT_SYMBOL(capi20_get_serial);
u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
- profp->ncontroller = ncards;
+ profp->ncontroller = ncontrollers;
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- memcpy((void *) profp, &card->profile,
- sizeof(struct capi_profile));
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ memcpy(profp, &ctr->profile, sizeof(struct capi_profile));
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_profile);
+/* Must be called with capi_controller_lock held. */
+static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state)
+{
+ DEFINE_WAIT(wait);
+ int retval = 0;
+
+ ctr = capi_ctr_get(ctr);
+ if (!ctr)
+ return -ESRCH;
+
+ for (;;) {
+ prepare_to_wait(&ctr->state_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (ctr->state == state)
+ break;
+ if (ctr->state == CAPI_CTR_DETACHED) {
+ retval = -ESRCH;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -EINTR;
+ break;
+ }
+
+ mutex_unlock(&capi_controller_lock);
+ schedule();
+ mutex_lock(&capi_controller_lock);
+ }
+ finish_wait(&ctr->state_wait_queue, &wait);
+
+ capi_ctr_put(ctr);
+
+ return retval;
+}
+
#ifdef AVMB1_COMPAT
static int old_capi_manufacturer(unsigned int cmd, void __user *data)
{
@@ -895,11 +1007,10 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
avmb1_extcarddef cdef;
avmb1_resetdef rdef;
capicardparams cparams;
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
struct capi_driver *driver = NULL;
capiloaddata ldata;
struct list_head *l;
- unsigned long flags;
int retval;
switch (cmd) {
@@ -919,7 +1030,8 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
cparams.irq = cdef.irq;
cparams.cardnr = cdef.cardnr;
- read_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
+
switch (cdef.cardtype) {
case AVM_CARDTYPE_B1:
list_for_each(l, &capi_drivers) {
@@ -940,18 +1052,15 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
break;
}
if (!driver) {
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
printk(KERN_ERR "kcapi: driver not loaded.\n");
- return -EIO;
- }
- if (!driver->add_card) {
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ retval = -EIO;
+ } else if (!driver->add_card) {
printk(KERN_ERR "kcapi: driver has no add card function.\n");
- return -EIO;
- }
+ retval = -EIO;
+ } else
+ retval = driver->add_card(driver, &cparams);
- retval = driver->add_card(driver, &cparams);
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
return retval;
case AVMB1_LOAD:
@@ -968,27 +1077,30 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
sizeof(avmb1_loadandconfigdef)))
return -EFAULT;
}
- card = get_capi_ctr_by_nr(ldef.contr);
- if (!card)
- return -EINVAL;
- card = capi_ctr_get(card);
- if (!card)
- return -ESRCH;
- if (card->load_firmware == NULL) {
+
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(ldef.contr);
+ if (!ctr) {
+ retval = -EINVAL;
+ goto load_unlock_out;
+ }
+
+ if (ctr->load_firmware == NULL) {
printk(KERN_DEBUG "kcapi: load: no load function\n");
- capi_ctr_put(card);
- return -ESRCH;
+ retval = -ESRCH;
+ goto load_unlock_out;
}
if (ldef.t4file.len <= 0) {
printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
- capi_ctr_put(card);
- return -EINVAL;
+ retval = -EINVAL;
+ goto load_unlock_out;
}
if (ldef.t4file.data == NULL) {
printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
- capi_ctr_put(card);
- return -EINVAL;
+ retval = -EINVAL;
+ goto load_unlock_out;
}
ldata.firmware.user = 1;
@@ -998,54 +1110,49 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
ldata.configuration.data = ldef.t4config.data;
ldata.configuration.len = ldef.t4config.len;
- if (card->cardstate != CARD_DETECTED) {
+ if (ctr->state != CAPI_CTR_DETECTED) {
printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
- capi_ctr_put(card);
- return -EBUSY;
+ retval = -EBUSY;
+ goto load_unlock_out;
}
- card->cardstate = CARD_LOADING;
-
- retval = card->load_firmware(card, &ldata);
+ ctr->state = CAPI_CTR_LOADING;
+ retval = ctr->load_firmware(ctr, &ldata);
if (retval) {
- card->cardstate = CARD_DETECTED;
- capi_ctr_put(card);
- return retval;
+ ctr->state = CAPI_CTR_DETECTED;
+ goto load_unlock_out;
}
- while (card->cardstate != CARD_RUNNING) {
-
- msleep_interruptible(100); /* 0.1 sec */
+ retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING);
- if (signal_pending(current)) {
- capi_ctr_put(card);
- return -EINTR;
- }
- }
- capi_ctr_put(card);
- return 0;
+load_unlock_out:
+ mutex_unlock(&capi_controller_lock);
+ return retval;
case AVMB1_RESETCARD:
if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef)))
return -EFAULT;
- card = get_capi_ctr_by_nr(rdef.contr);
- if (!card)
- return -ESRCH;
- if (card->cardstate == CARD_DETECTED)
- return 0;
+ retval = 0;
- card->reset_ctr(card);
+ mutex_lock(&capi_controller_lock);
- while (card->cardstate > CARD_DETECTED) {
+ ctr = get_capi_ctr_by_nr(rdef.contr);
+ if (!ctr) {
+ retval = -ESRCH;
+ goto reset_unlock_out;
+ }
- msleep_interruptible(100); /* 0.1 sec */
+ if (ctr->state == CAPI_CTR_DETECTED)
+ goto reset_unlock_out;
- if (signal_pending(current))
- return -EINTR;
- }
- return 0;
+ ctr->reset_ctr(ctr);
+
+ retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
+reset_unlock_out:
+ mutex_unlock(&capi_controller_lock);
+ return retval;
}
return -EINVAL;
}
@@ -1062,7 +1169,8 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
int capi20_manufacturer(unsigned int cmd, void __user *data)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ int retval;
switch (cmd) {
#ifdef AVMB1_COMPAT
@@ -1080,14 +1188,20 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
if (copy_from_user(&fdef, data, sizeof(kcapi_flagdef)))
return -EFAULT;
- card = get_capi_ctr_by_nr(fdef.contr);
- if (!card)
- return -ESRCH;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(fdef.contr);
+ if (ctr) {
+ ctr->traceflag = fdef.flag;
+ printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
+ ctr->cnr, ctr->traceflag);
+ retval = 0;
+ } else
+ retval = -ESRCH;
+
+ mutex_unlock(&capi_controller_lock);
- card->traceflag = fdef.flag;
- printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
- card->cnr, card->traceflag);
- return 0;
+ return retval;
}
case KCAPI_CMD_ADDCARD:
{
@@ -1095,7 +1209,6 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
struct capi_driver *driver = NULL;
capicardparams cparams;
kcapi_carddef cdef;
- int retval;
if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
return retval;
@@ -1107,6 +1220,8 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
cparams.cardtype = 0;
cdef.driver[sizeof(cdef.driver)-1] = 0;
+ mutex_lock(&capi_drivers_lock);
+
list_for_each(l, &capi_drivers) {
driver = list_entry(l, struct capi_driver, list);
if (strcmp(driver->name, cdef.driver) == 0)
@@ -1115,15 +1230,15 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
if (driver == NULL) {
printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
cdef.driver);
- return -ESRCH;
- }
-
- if (!driver->add_card) {
+ retval = -ESRCH;
+ } else if (!driver->add_card) {
printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver);
- return -EIO;
- }
+ retval = -EIO;
+ } else
+ retval = driver->add_card(driver, &cparams);
- return driver->add_card(driver, &cparams);
+ mutex_unlock(&capi_drivers_lock);
+ return retval;
}
default:
@@ -1137,30 +1252,6 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
EXPORT_SYMBOL(capi20_manufacturer);
-/* temporary hack */
-
-/**
- * capi20_set_callback() - set CAPI application notification callback function
- * @ap: CAPI application descriptor structure.
- * @callback: callback function (NULL to remove).
- *
- * If not NULL, the callback function will be called to notify the
- * application of the addition or removal of a controller.
- * The first argument (cmd) will tell whether the controller was added
- * (KCI_CONTRUP) or removed (KCI_CONTRDOWN).
- * The second argument (contr) will be the controller number.
- * For cmd==KCI_CONTRUP the third argument (data) will be a pointer to the
- * new controller's capability profile structure.
- */
-
-void capi20_set_callback(struct capi20_appl *ap,
- void (*callback) (unsigned int cmd, __u32 contr, void *data))
-{
- ap->callback = callback;
-}
-
-EXPORT_SYMBOL(capi20_set_callback);
-
/* ------------------------------------------------------------- */
/* -------- Init & Cleanup ------------------------------------- */
/* ------------------------------------------------------------- */
@@ -1169,27 +1260,21 @@ EXPORT_SYMBOL(capi20_set_callback);
* init / exit functions
*/
+static struct notifier_block capictr_nb = {
+ .notifier_call = notify_handler,
+ .priority = INT_MAX,
+};
+
static int __init kcapi_init(void)
{
- char *p;
- char rev[32];
- int ret;
-
- ret = cdebug_init();
- if (ret)
- return ret;
- kcapi_proc_init();
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
+ int err;
- printk(KERN_NOTICE "CAPI Subsystem Rev %s\n", rev);
+ register_capictr_notifier(&capictr_nb);
- return 0;
+ err = cdebug_init();
+ if (!err)
+ kcapi_proc_init();
+ return err;
}
static void __exit kcapi_exit(void)
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 244711f7f838..f4620b38ec51 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -24,16 +24,19 @@ printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
#endif
enum {
- CARD_DETECTED = 1,
- CARD_LOADING = 2,
- CARD_RUNNING = 3,
+ CAPI_CTR_DETACHED = 0,
+ CAPI_CTR_DETECTED = 1,
+ CAPI_CTR_LOADING = 2,
+ CAPI_CTR_RUNNING = 3,
};
extern struct list_head capi_drivers;
-extern rwlock_t capi_drivers_list_lock;
+extern struct mutex capi_drivers_lock;
+
+extern struct capi_ctr *capi_controller[CAPI_MAXCONTR];
+extern struct mutex capi_controller_lock;
extern struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-extern struct capi_ctr *capi_cards[CAPI_MAXCONTR];
#ifdef CONFIG_PROC_FS
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 09d4db764d22..ea2dff602e49 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -15,13 +15,12 @@
#include <linux/seq_file.h>
#include <linux/init.h>
-static char *
-cardstate2str(unsigned short cardstate)
+static char *state2str(unsigned short state)
{
- switch (cardstate) {
- case CARD_DETECTED: return "detected";
- case CARD_LOADING: return "loading";
- case CARD_RUNNING: return "running";
+ switch (state) {
+ case CAPI_CTR_DETECTED: return "detected";
+ case CAPI_CTR_LOADING: return "loading";
+ case CAPI_CTR_RUNNING: return "running";
default: return "???";
}
}
@@ -36,9 +35,12 @@ cardstate2str(unsigned short cardstate)
// ---------------------------------------------------------------------------
static void *controller_start(struct seq_file *seq, loff_t *pos)
+ __acquires(capi_controller_lock)
{
+ mutex_lock(&capi_controller_lock);
+
if (*pos < CAPI_MAXCONTR)
- return &capi_cards[*pos];
+ return &capi_controller[*pos];
return NULL;
}
@@ -47,13 +49,15 @@ static void *controller_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
if (*pos < CAPI_MAXCONTR)
- return &capi_cards[*pos];
+ return &capi_controller[*pos];
return NULL;
}
static void controller_stop(struct seq_file *seq, void *v)
+ __releases(capi_controller_lock)
{
+ mutex_unlock(&capi_controller_lock);
}
static int controller_show(struct seq_file *seq, void *v)
@@ -65,7 +69,7 @@ static int controller_show(struct seq_file *seq, void *v)
seq_printf(seq, "%d %-10s %-8s %-16s %s\n",
ctr->cnr, ctr->driver_name,
- cardstate2str(ctr->cardstate),
+ state2str(ctr->state),
ctr->name,
ctr->procinfo ? ctr->procinfo(ctr) : "");
@@ -135,9 +139,11 @@ static const struct file_operations proc_contrstats_ops = {
// applid nrecvctlpkt nrecvdatapkt nsentctlpkt nsentdatapkt
// ---------------------------------------------------------------------------
-static void *
-applications_start(struct seq_file *seq, loff_t *pos)
+static void *applications_start(struct seq_file *seq, loff_t *pos)
+ __acquires(capi_controller_lock)
{
+ mutex_lock(&capi_controller_lock);
+
if (*pos < CAPI_MAXAPPL)
return &capi_applications[*pos];
@@ -154,9 +160,10 @@ applications_next(struct seq_file *seq, void *v, loff_t *pos)
return NULL;
}
-static void
-applications_stop(struct seq_file *seq, void *v)
+static void applications_stop(struct seq_file *seq, void *v)
+ __releases(capi_controller_lock)
{
+ mutex_unlock(&capi_controller_lock);
}
static int
@@ -239,9 +246,9 @@ static const struct file_operations proc_applstats_ops = {
// ---------------------------------------------------------------------------
static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
- __acquires(&capi_drivers_list_lock)
+ __acquires(&capi_drivers_lock)
{
- read_lock(&capi_drivers_list_lock);
+ mutex_lock(&capi_drivers_lock);
return seq_list_start(&capi_drivers, *pos);
}
@@ -251,9 +258,9 @@ static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void capi_driver_stop(struct seq_file *seq, void *v)
- __releases(&capi_drivers_list_lock)
+ __releases(&capi_drivers_lock)
{
- read_unlock(&capi_drivers_list_lock);
+ mutex_unlock(&capi_drivers_lock);
}
static int capi_driver_show(struct seq_file *seq, void *v)
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 3f5cd06af104..6f0ae32906bf 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -13,6 +13,8 @@
#include "gigaset.h"
#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/isdn/capilli.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
@@ -2106,35 +2108,22 @@ static char *gigaset_procinfo(struct capi_ctr *ctr)
return ctr->name; /* ToDo: more? */
}
-/**
- * gigaset_ctr_read_proc() - build controller proc file entry
- * @page: buffer of PAGE_SIZE bytes for receiving the entry.
- * @start: unused.
- * @off: unused.
- * @count: unused.
- * @eof: unused.
- * @ctr: controller descriptor structure.
- *
- * Return value: length of generated entry
- */
-static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctr)
+static int gigaset_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctr = m->private;
struct cardstate *cs = ctr->driverdata;
char *s;
int i;
- int len = 0;
- len += sprintf(page+len, "%-16s %s\n", "name", ctr->name);
- len += sprintf(page+len, "%-16s %s %s\n", "dev",
+
+ seq_printf(m, "%-16s %s\n", "name", ctr->name);
+ seq_printf(m, "%-16s %s %s\n", "dev",
dev_driver_string(cs->dev), dev_name(cs->dev));
- len += sprintf(page+len, "%-16s %d\n", "id", cs->myid);
+ seq_printf(m, "%-16s %d\n", "id", cs->myid);
if (cs->gotfwver)
- len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware",
+ seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
- len += sprintf(page+len, "%-16s %d\n", "channels",
- cs->channels);
- len += sprintf(page+len, "%-16s %s\n", "onechannel",
- cs->onechannel ? "yes" : "no");
+ seq_printf(m, "%-16s %d\n", "channels", cs->channels);
+ seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
switch (cs->mode) {
case M_UNKNOWN:
@@ -2152,7 +2141,7 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "%-16s %s\n", "mode", s);
+ seq_printf(m, "%-16s %s\n", "mode", s);
switch (cs->mstate) {
case MS_UNINITIALIZED:
@@ -2176,25 +2165,21 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "%-16s %s\n", "mstate", s);
+ seq_printf(m, "%-16s %s\n", "mstate", s);
- len += sprintf(page+len, "%-16s %s\n", "running",
- cs->running ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "connected",
- cs->connected ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "isdn_up",
- cs->isdn_up ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "cidmode",
- cs->cidmode ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
for (i = 0; i < cs->channels; i++) {
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted",
+ seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
cs->bcs[i].corrupted);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down",
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
cs->bcs[i].trans_down);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up",
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
cs->bcs[i].trans_up);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate",
+ seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
cs->bcs[i].chstate);
switch (cs->bcs[i].proto2) {
case L2_BITSYNC:
@@ -2209,11 +2194,23 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s);
+ seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
}
- return len;
+ return 0;
}
+static int gigaset_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gigaset_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations gigaset_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = gigaset_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static struct capi_driver capi_driver_gigaset = {
.name = "gigaset",
@@ -2256,7 +2253,7 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
iif->ctr.release_appl = gigaset_release_appl;
iif->ctr.send_message = gigaset_send_message;
iif->ctr.procinfo = gigaset_procinfo;
- iif->ctr.ctr_read_proc = gigaset_ctr_read_proc;
+ iif->ctr.proc_fops = &gigaset_proc_fops;
INIT_LIST_HEAD(&iif->appls);
skb_queue_head_init(&iif->sendqueue);
atomic_set(&iif->sendqlen, 0);
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index d964f07e4a56..a70e8854461d 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -556,8 +556,7 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
void b1_parse_version(avmctrl_info *card);
irqreturn_t b1_interrupt(int interrupt, void *devptr);
-int b1ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl);
+extern const struct file_operations b1ctl_proc_fops;
avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
long rsize, long ssize);
@@ -577,7 +576,6 @@ void b1dma_register_appl(struct capi_ctr *ctrl,
capi_register_params *rp);
void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-int b1dmactl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl);
+extern const struct file_operations b1dmactl_proc_fops;
#endif /* _AVMCARD_H_ */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index a7c0083e78a7..c38fa0f4c729 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -634,18 +636,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
}
/* ------------------------------------------------------------- */
-int b1ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int b1ctl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -658,20 +659,20 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
- len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr);
+ seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -685,7 +686,7 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -693,16 +694,25 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
+}
+
+static int b1ctl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, b1ctl_proc_show, PDE(inode)->data);
}
+const struct file_operations b1ctl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = b1ctl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+EXPORT_SYMBOL(b1ctl_proc_fops);
+
/* ------------------------------------------------------------- */
#ifdef CONFIG_PCI
@@ -781,8 +791,6 @@ EXPORT_SYMBOL(b1_send_message);
EXPORT_SYMBOL(b1_parse_version);
EXPORT_SYMBOL(b1_interrupt);
-EXPORT_SYMBOL(b1ctl_read_proc);
-
static int __init b1_init(void)
{
char *p;
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 0e84aaae43fd..124550d0dbf3 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -855,21 +857,20 @@ u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
/* ------------------------------------------------------------- */
-int b1dmactl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int b1dmactl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
u32 txoff, txlen, rxoff, rxlen, csr;
unsigned long flags;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
- len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -882,18 +883,18 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -907,7 +908,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -915,7 +916,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
spin_lock_irqsave(&card->lock, flags);
@@ -930,27 +931,30 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
spin_unlock_irqrestore(&card->lock, flags);
- len += sprintf(page+len, "%-16s 0x%lx\n",
- "csr (cached)", (unsigned long)card->csr);
- len += sprintf(page+len, "%-16s 0x%lx\n",
- "csr", (unsigned long)csr);
- len += sprintf(page+len, "%-16s %lu\n",
- "txoff", (unsigned long)txoff);
- len += sprintf(page+len, "%-16s %lu\n",
- "txlen", (unsigned long)txlen);
- len += sprintf(page+len, "%-16s %lu\n",
- "rxoff", (unsigned long)rxoff);
- len += sprintf(page+len, "%-16s %lu\n",
- "rxlen", (unsigned long)rxlen);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
+ seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
+ seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
+ seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
+ seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
+ seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
+
+ return 0;
+}
+
+static int b1dmactl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, b1dmactl_proc_show, PDE(inode)->data);
}
+const struct file_operations b1dmactl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = b1dmactl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+EXPORT_SYMBOL(b1dmactl_proc_fops);
+
/* ------------------------------------------------------------- */
EXPORT_SYMBOL(b1dma_reset);
@@ -963,7 +967,6 @@ EXPORT_SYMBOL(b1dma_reset_ctr);
EXPORT_SYMBOL(b1dma_register_appl);
EXPORT_SYMBOL(b1dma_release_appl);
EXPORT_SYMBOL(b1dma_send_message);
-EXPORT_SYMBOL(b1dmactl_read_proc);
static int __init b1dma_init(void)
{
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 6461a32bc838..ff5390546f92 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -121,7 +121,7 @@ static int b1isa_probe(struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1isa_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index 5b314a2c4049..c97e4315079d 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -112,7 +112,7 @@ static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pci_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
cinfo->capi_ctrl.owner = THIS_MODULE;
@@ -251,7 +251,7 @@ static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/isdn/hardware/avm/b1pcmcia.c
index 7740403b40e1..d6391e0afeea 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/isdn/hardware/avm/b1pcmcia.c
@@ -108,7 +108,7 @@ static int b1pcmcia_add_card(unsigned int port, unsigned irq,
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 6833301a45fc..de6e6b311819 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -1062,19 +1064,18 @@ static char *c4_procinfo(struct capi_ctr *ctrl)
return cinfo->infobuf;
}
-static int c4_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int c4_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
- len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -1087,18 +1088,18 @@ static int c4_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -1112,7 +1113,7 @@ static int c4_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -1120,16 +1121,24 @@ static int c4_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
}
+static int c4_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, c4_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations c4_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = c4_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* ------------------------------------------------------------- */
static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
@@ -1201,7 +1210,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
cinfo->capi_ctrl.load_firmware = c4_load_firmware;
cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
cinfo->capi_ctrl.procinfo = c4_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = c4_read_proc;
+ cinfo->capi_ctrl.proc_fops = &c4_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 1c53fd49adb6..baeeb3c2a3ee 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -429,7 +429,7 @@ static int t1isa_probe(struct pci_dev *pdev, int cardnr)
cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
cinfo->capi_ctrl.procinfo = t1isa_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index e6d298d75146..5a3f83098018 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -119,7 +119,7 @@ static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = t1pci_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index 98fcdfc7ca55..0f073cd73763 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <asm/uaccess.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include "os_capi.h"
@@ -75,25 +76,32 @@ void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb)
/*
* proc function for controller info
*/
-static int diva_ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int diva_ctl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
diva_card *card = (diva_card *) ctrl->driverdata;
- int len = 0;
-
- len += sprintf(page + len, "%s\n", ctrl->name);
- len += sprintf(page + len, "Serial No. : %s\n", ctrl->serial);
- len += sprintf(page + len, "Id : %d\n", card->Id);
- len += sprintf(page + len, "Channels : %d\n", card->d.channels);
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+
+ seq_printf(m, "%s\n", ctrl->name);
+ seq_printf(m, "Serial No. : %s\n", ctrl->serial);
+ seq_printf(m, "Id : %d\n", card->Id);
+ seq_printf(m, "Channels : %d\n", card->d.channels);
+
+ return 0;
+}
+
+static int diva_ctl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, diva_ctl_proc_show, NULL);
}
+static const struct file_operations diva_ctl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = diva_ctl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* set additional os settings in capi_ctr struct
*/
@@ -102,7 +110,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
ctrl->driver_name = DRIVERLNAME;
ctrl->load_firmware = NULL;
ctrl->reset_ctr = NULL;
- ctrl->ctr_read_proc = diva_ctl_read_proc;
+ ctrl->proc_fops = &diva_ctl_proc_fops;
ctrl->owner = THIS_MODULE;
}
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 993b14cf1778..5d06a7437824 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include "platform.h"
@@ -62,39 +63,41 @@ static char *getrev(const char *revision)
return rev;
}
-static int
-proc_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int divadidd_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
char tmprev[32];
strcpy(tmprev, main_revision);
- len += sprintf(page + len, "%s\n", DRIVERNAME);
- len += sprintf(page + len, "name : %s\n", DRIVERLNAME);
- len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_DIDD);
- len += sprintf(page + len, "build : %s(%s)\n",
+ seq_printf(m, "%s\n", DRIVERNAME);
+ seq_printf(m, "name : %s\n", DRIVERLNAME);
+ seq_printf(m, "release : %s\n", DRIVERRELEASE_DIDD);
+ seq_printf(m, "build : %s(%s)\n",
diva_didd_common_code_build, DIVA_BUILD);
- len += sprintf(page + len, "revision : %s\n", getrev(tmprev));
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ seq_printf(m, "revision : %s\n", getrev(tmprev));
+
+ return 0;
}
+static int divadidd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, divadidd_proc_show, NULL);
+}
+
+static const struct file_operations divadidd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = divadidd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int DIVA_INIT_FUNCTION create_proc(void)
{
proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
if (proc_net_eicon) {
- if ((proc_didd =
- create_proc_entry(DRIVERLNAME, S_IFREG | S_IRUGO,
- proc_net_eicon))) {
- proc_didd->read_proc = proc_read;
- }
+ proc_didd = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
+ &divadidd_proc_fops);
return (1);
}
return (0);
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 69e71ebe7841..f577719ab3fa 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
+#include <linux/seq_file.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
@@ -86,39 +87,40 @@ static void diva_um_timer_function(unsigned long data);
extern struct proc_dir_entry *proc_net_eicon;
static struct proc_dir_entry *um_idi_proc_entry = NULL;
-static int
-um_idi_proc_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int um_idi_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
char tmprev[32];
- len += sprintf(page + len, "%s\n", DRIVERNAME);
- len += sprintf(page + len, "name : %s\n", DRIVERLNAME);
- len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_IDI);
+ seq_printf(m, "%s\n", DRIVERNAME);
+ seq_printf(m, "name : %s\n", DRIVERLNAME);
+ seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI);
strcpy(tmprev, main_revision);
- len += sprintf(page + len, "revision : %s\n", getrev(tmprev));
- len += sprintf(page + len, "build : %s\n", DIVA_BUILD);
- len += sprintf(page + len, "major : %d\n", major);
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ seq_printf(m, "revision : %s\n", getrev(tmprev));
+ seq_printf(m, "build : %s\n", DIVA_BUILD);
+ seq_printf(m, "major : %d\n", major);
+
+ return 0;
+}
+
+static int um_idi_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, um_idi_proc_show, NULL);
}
+static const struct file_operations um_idi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = um_idi_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int DIVA_INIT_FUNCTION create_um_idi_proc(void)
{
- um_idi_proc_entry = create_proc_entry(DRIVERLNAME,
- S_IFREG | S_IRUGO | S_IWUSR,
- proc_net_eicon);
+ um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
+ &um_idi_proc_fops);
if (!um_idi_proc_entry)
return (0);
-
- um_idi_proc_entry->read_proc = um_idi_proc_read;
-
return (1);
}
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 040827288ec9..46d44a942624 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/list.h>
#include <asm/uaccess.h>
@@ -141,14 +142,10 @@ void remove_divas_proc(void)
}
}
-/*
-** write group_optimization
-*/
-static int
-write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -172,14 +169,10 @@ write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
return (-EINVAL);
}
-/*
-** write dynamic_l1_down
-*/
-static int
-write_d_l1_down(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -203,63 +196,62 @@ write_d_l1_down(struct file *file, const char __user *buffer, unsigned long coun
return (-EINVAL);
}
-
-/*
-** read dynamic_l1_down
-*/
-static int
-read_d_l1_down(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int d_l1_down_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len += sprintf(page + len, "%s\n",
+ seq_printf(m, "%s\n",
(IoAdapter->capi_cfg.
cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
"0");
+ return 0;
+}
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+static int d_l1_down_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, d_l1_down_proc_show, PDE(inode)->data);
}
-/*
-** read group_optimization
-*/
-static int
-read_grp_opt(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static const struct file_operations d_l1_down_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = d_l1_down_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = d_l1_down_proc_write,
+};
+
+static int grp_opt_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len += sprintf(page + len, "%s\n",
+ seq_printf(m, "%s\n",
(IoAdapter->capi_cfg.
cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
? "1" : "0");
+ return 0;
+}
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+static int grp_opt_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, grp_opt_proc_show, PDE(inode)->data);
}
-/*
-** info write
-*/
-static int
-info_write(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static const struct file_operations grp_opt_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = grp_opt_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = grp_opt_proc_write,
+};
+
+static ssize_t info_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
char c[4];
@@ -277,63 +269,46 @@ info_write(struct file *file, const char __user *buffer, unsigned long count,
return (-EINVAL);
}
-/*
-** info read
-*/
-static int
-info_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int info_proc_show(struct seq_file *m, void *v)
{
int i = 0;
- int len = 0;
char *p;
char tmpser[16];
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len +=
- sprintf(page + len, "Name : %s\n",
- IoAdapter->Properties.Name);
- len += sprintf(page + len, "DSP state : %08x\n", a->dsp_mask);
- len += sprintf(page + len, "Channels : %02d\n",
- IoAdapter->Properties.Channels);
- len += sprintf(page + len, "E. max/used : %03d/%03d\n",
+ seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name);
+ seq_printf(m, "DSP state : %08x\n", a->dsp_mask);
+ seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels);
+ seq_printf(m, "E. max/used : %03d/%03d\n",
IoAdapter->e_max, IoAdapter->e_count);
diva_get_vserial_number(IoAdapter, tmpser);
- len += sprintf(page + len, "Serial : %s\n", tmpser);
- len +=
- sprintf(page + len, "IRQ : %d\n",
- IoAdapter->irq_info.irq_nr);
- len += sprintf(page + len, "CardIndex : %d\n", a->CardIndex);
- len += sprintf(page + len, "CardOrdinal : %d\n", a->CardOrdinal);
- len += sprintf(page + len, "Controller : %d\n", a->controller);
- len += sprintf(page + len, "Bus-Type : %s\n",
+ seq_printf(m, "Serial : %s\n", tmpser);
+ seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr);
+ seq_printf(m, "CardIndex : %d\n", a->CardIndex);
+ seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
+ seq_printf(m, "Controller : %d\n", a->controller);
+ seq_printf(m, "Bus-Type : %s\n",
(a->Bus ==
DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
- len += sprintf(page + len, "Port-Name : %s\n", a->port_name);
+ seq_printf(m, "Port-Name : %s\n", a->port_name);
if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
- len +=
- sprintf(page + len, "PCI-bus : %d\n",
- a->resources.pci.bus);
- len +=
- sprintf(page + len, "PCI-func : %d\n",
- a->resources.pci.func);
+ seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus);
+ seq_printf(m, "PCI-func : %d\n", a->resources.pci.func);
for (i = 0; i < 8; i++) {
if (a->resources.pci.bar[i]) {
- len +=
- sprintf(page + len,
+ seq_printf(m,
"Mem / I/O %d : 0x%x / mapped : 0x%lx",
i, a->resources.pci.bar[i],
(unsigned long) a->resources.
pci.addr[i]);
if (a->resources.pci.length[i]) {
- len +=
- sprintf(page + len,
+ seq_printf(m,
" / length : %d",
a->resources.pci.
length[i]);
}
- len += sprintf(page + len, "\n");
+ seq_putc(m, '\n');
}
}
}
@@ -353,16 +328,25 @@ info_read(char *page, char **start, off_t off, int count, int *eof,
} else {
p = "ready";
}
- len += sprintf(page + len, "State : %s\n", p);
+ seq_printf(m, "State : %s\n", p);
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ return 0;
+}
+
+static int info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_proc_show, PDE(inode)->data);
}
+static const struct file_operations info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = info_proc_write,
+};
+
/*
** adapter proc init/de-init
*/
@@ -380,28 +364,20 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
return (0);
a->proc_adapter_dir = (void *) de;
- if (!(pe =
- create_proc_entry(info_proc_name, S_IFREG | S_IRUGO | S_IWUSR, de)))
+ pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
+ &info_proc_fops, a);
+ if (!pe)
return (0);
a->proc_info = (void *) pe;
- pe->write_proc = info_write;
- pe->read_proc = info_read;
- pe->data = a;
- if ((pe = create_proc_entry(grp_opt_proc_name,
- S_IFREG | S_IRUGO | S_IWUSR, de))) {
+ pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
+ &grp_opt_proc_fops, a);
+ if (pe)
a->proc_grp_opt = (void *) pe;
- pe->write_proc = write_grp_opt;
- pe->read_proc = read_grp_opt;
- pe->data = a;
- }
- if ((pe = create_proc_entry(d_l1_down_proc_name,
- S_IFREG | S_IRUGO | S_IWUSR, de))) {
+ pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
+ &d_l1_down_proc_fops, a);
+ if (pe)
a->proc_d_l1_down = (void *) pe;
- pe->write_proc = write_d_l1_down;
- pe->read_proc = read_d_l1_down;
- pe->data = a;
- }
DBG_TRC(("proc entry %s created", tmp));
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 1a1420d7a828..ad36df9b759c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2846,7 +2846,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
int conf;
if (ch < 0 || ch > 31)
- return EINVAL;
+ return -EINVAL;
oslot_tx = hc->chan[ch].slot_tx;
oslot_rx = hc->chan[ch].slot_rx;
conf = hc->chan[ch].conf;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 62441ba53b95..36c6c616a655 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1133,6 +1133,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
kfree(sc);
release_card(card);
+ break;
} else
card->sc[i - 1] = sc;
}
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index d3f1077b709b..2952a58c7a61 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -529,6 +529,7 @@ W6692_fill_Bfifo(struct w6692_ch *wch)
}
}
+#if 0
static int
setvolume(struct w6692_ch *wch, int mic, struct sk_buff *skb)
{
@@ -571,6 +572,7 @@ enable_pots(struct w6692_ch *wch)
WriteW6692(card, W_PCTL, card->pctl);
return 0;
}
+#endif
static int
disable_pots(struct w6692_ch *wch)
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index bfeb9b6aa043..6bde16c00fb5 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -138,7 +138,7 @@ waitrecmsg(struct IsdnCardState *cs, u_char *len,
while((!(cs->BC_Read_Reg(cs, 0, ISAR_IRQBIT) & ISAR_IRQSTA)) &&
(timeout++ < maxdelay))
udelay(1);
- if (timeout >= maxdelay) {
+ if (timeout > maxdelay) {
printk(KERN_WARNING"isar recmsg IRQSTA timeout\n");
return(0);
}
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 4ffaa14b9fc4..fe874afa4f81 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -11,6 +11,8 @@
*/
#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -432,26 +434,16 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
return retval;
}
-/*********************************************************************
-hycapi_read_proc
-
-Informations provided in the /proc/capi-entries.
-
-*********************************************************************/
-
-static int hycapi_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int hycapi_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
hysdn_card *card = cinfo->card;
- int len = 0;
char *s;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_read_proc\n");
-#endif
- len += sprintf(page+len, "%-16s %s\n", "name", cinfo->cardname);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->iobase);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
+
+ seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
switch (card->brdtype) {
case BD_PCCARD: s = "HYSDN Hycard"; break;
@@ -461,24 +453,32 @@ static int hycapi_read_proc(char *page, char **start, off_t off,
case BD_PLEXUS: s = "HYSDN Plexus30"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ return 0;
+}
+
+static int hycapi_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hycapi_proc_show, PDE(inode)->data);
}
+static const struct file_operations hycapi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = hycapi_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**************************************************************
hycapi_load_firmware
@@ -774,7 +774,7 @@ hycapi_capi_create(hysdn_card *card)
ctrl->load_firmware = hycapi_load_firmware;
ctrl->reset_ctr = hycapi_reset_ctr;
ctrl->procinfo = hycapi_procinfo;
- ctrl->ctr_read_proc = hycapi_read_proc;
+ ctrl->proc_fops = &hycapi_proc_fops;
strcpy(ctrl->name, cinfo->cardname);
ctrl->owner = THIS_MODULE;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8b8558fcb042..37d8579fc7a9 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -949,11 +949,8 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
(*secfilter)->filter_mask[10] = mac_mask[1];
(*secfilter)->filter_mask[11]=mac_mask[0];
- dprintk("%s: filter mac=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- dprintk("%s: filter mask=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac_mask[0], mac_mask[1], mac_mask[2],
- mac_mask[3], mac_mask[4], mac_mask[5]);
+ dprintk("%s: filter mac=%pM\n", dev->name, mac);
+ dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
return 0;
}
@@ -1141,18 +1138,18 @@ static void wq_set_multicast_list (struct work_struct *work)
} else if ((dev->flags & IFF_ALLMULTI)) {
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
int mci;
struct dev_mc_list *mc;
dprintk("%s: set_mc_list, %d entries\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
for (mci = 0, mc=dev->mc_list;
- mci < dev->mc_count;
+ mci < netdev_mc_count(dev);
mc = mc->next, mci++) {
dvb_set_mc_filter(dev, mc);
}
@@ -1239,7 +1236,6 @@ static void dvb_net_setup(struct net_device *dev)
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
- dev->mc_count = 0;
dev->flags |= IFF_NOARP;
}
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 7045c45da9b1..949a648f8e2e 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -111,10 +111,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
break;
case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
- seq_printf(seq,
- "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
break;
case I2O_SNFORMAT_WAN: /* WAN MAC Address */
@@ -126,10 +123,8 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
/* FIXME: Figure out what a LAN-64 address really looks like?? */
seq_printf(seq,
- "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[8], serialno[9],
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
+ serialno[8], serialno[9], &serialno[2]);
break;
case I2O_SNFORMAT_DDM: /* I2O DDM */
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
index 50d431e469f5..9dbaeb574e63 100644
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -43,15 +43,14 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_hdr *fw_hdr = &parser->versions;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
parser->file = file;
parser->file_size = file_size;
parser->cur_pos = 0;
- parser->buf = NULL;
-
+ parser->entry_point = 0;
parser->buf = kzalloc(block_size, GFP_KERNEL);
if (!parser->buf) {
LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
@@ -70,7 +69,7 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
parser->cur_pos += sizeof(struct iwmct_fw_hdr);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -113,7 +112,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
struct iwmct_dbg *dbg = &priv->dbg;
struct iwmct_fw_sec_hdr *sec_hdr;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
<= parser->file_size) {
@@ -152,7 +151,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
"finished with section cur_pos=%zd\n", parser->cur_pos);
}
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, INIT, "<--\n");
return 0;
}
@@ -167,7 +166,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
int ret = 0;
u32 cmd = 0;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
addr, sec_size);
@@ -229,7 +228,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
hdr->cmd = cpu_to_le32(cmd);
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, trans_size);
+ ret = iwmct_tx(priv, parser->buf, trans_size);
if (ret != 0) {
LOG_INFO(priv, FW_DOWNLOAD,
"iwmct_tx returned %d\n", ret);
@@ -251,7 +250,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
if (sent < sec_size)
ret = -EINVAL;
exit:
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return ret;
}
@@ -262,7 +261,7 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
int ret;
u32 cmd;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
memset(parser->buf, 0, parser->buf_size);
cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
@@ -281,11 +280,11 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
+ ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
if (ret)
LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -298,8 +297,16 @@ int iwmct_fw_load(struct iwmct_priv *priv)
__le32 addr;
int ret;
- /* clear parser struct */
- memset(&priv->parser, 0, sizeof(struct iwmct_parser));
+
+ LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
+ priv->barker);
+ LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
/* get the firmware */
ret = request_firmware(&raw, fw_name, &priv->func->dev);
@@ -317,6 +324,7 @@ int iwmct_fw_load(struct iwmct_priv *priv)
LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
+ /* clear parser struct */
ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
if (ret < 0) {
LOG_ERROR(priv, FW_DOWNLOAD,
@@ -324,7 +332,6 @@ int iwmct_fw_load(struct iwmct_priv *priv)
goto exit;
}
- /* checksum */
if (!iwmct_checksum(priv)) {
LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
ret = -EINVAL;
@@ -333,23 +340,18 @@ int iwmct_fw_load(struct iwmct_priv *priv)
/* download firmware to device */
while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
- if (iwmct_download_section(priv, pdata, len, addr)) {
+ ret = iwmct_download_section(priv, pdata, len, addr);
+ if (ret) {
LOG_ERROR(priv, FW_DOWNLOAD,
"%s download section failed\n", fw_name);
- ret = -EIO;
goto exit;
}
}
- iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+ ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
exit:
kfree(priv->parser.buf);
-
- if (raw)
- release_firmware(raw);
-
- raw = NULL;
-
+ release_firmware(raw);
return ret;
}
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 43bd510e1872..740ff0738ea8 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -196,9 +196,7 @@ struct iwmct_priv {
struct list_head read_req_list;
};
-extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count);
-
+extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
extern int iwmct_fw_load(struct iwmct_priv *priv);
extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
index aba8121f978c..4434bb16cea7 100644
--- a/drivers/misc/iwmc3200top/log.h
+++ b/drivers/misc/iwmc3200top/log.h
@@ -37,13 +37,26 @@
#define LOG_SEV_INFO 3
#define LOG_SEV_INFOEX 4
-#define LOG_SEV_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
+/* Log levels not defined for FW */
+#define LOG_SEV_TRACE 5
+#define LOG_SEV_DUMP 6
+
+#define LOG_SEV_FW_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
BIT(LOG_SEV_INFOEX))
+#define LOG_SEV_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
+ BIT(LOG_SEV_INFOEX) | \
+ BIT(LOG_SEV_TRACE) | \
+ BIT(LOG_SEV_DUMP))
+
/* log source */
#define LOG_SRC_INIT 0
#define LOG_SRC_DEBUGFS 1
@@ -104,16 +117,16 @@ do { \
__func__, __LINE__, ##args); \
} while (0)
-#define LOG_INFOEX(priv, src, fmt, args...) \
+#define LOG_TRACE(priv, src, fmt, args...) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
dev_dbg(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_HEXDUMP(src, ptr, len) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
16, 1, ptr, len, false); \
} while (0)
@@ -142,7 +155,7 @@ ssize_t store_iwmct_log_level_fw(struct device *d,
#define LOG_ERROR(priv, src, fmt, args...)
#define LOG_WARNING(priv, src, fmt, args...)
#define LOG_INFO(priv, src, fmt, args...)
-#define LOG_INFOEX(priv, src, fmt, args...)
+#define LOG_TRACE(priv, src, fmt, args...)
#define LOG_HEXDUMP(src, ptr, len)
static inline void iwmct_log_top_message(struct iwmct_priv *priv,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index fafcaa481d74..dd0a3913bf6d 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -49,6 +49,20 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_COPYRIGHT);
MODULE_FIRMWARE(FW_NAME(FW_API_VER));
+
+static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
+
+}
+int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ int ret;
+ sdio_claim_host(priv->func);
+ ret = __iwmct_tx(priv, src, count);
+ sdio_release_host(priv->func);
+ return ret;
+}
/*
* This workers main task is to wait for OP_OPR_ALIVE
* from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -66,7 +80,7 @@ static void iwmct_rescan_worker(struct work_struct *ws)
ret = bus_rescan_devices(priv->func->dev.bus);
if (ret < 0)
- LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
+ LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
}
static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
@@ -137,7 +151,7 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
int ret;
u8 *buf;
- LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
+ LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
/* add padding to 256 for IWMC */
((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
@@ -158,27 +172,12 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
}
memcpy(buf, cmd, len);
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
- FW_HCMD_BLOCK_SIZE);
- sdio_release_host(priv->func);
+ ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
kfree(buf);
return ret;
}
-int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count)
-{
- int ret;
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, addr, src, count);
- sdio_release_host(priv->func);
-
- return ret;
-}
static void iwmct_irq_read_worker(struct work_struct *ws)
{
@@ -192,7 +191,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
priv = container_of(ws, struct iwmct_priv, isr_worker);
- LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
/* --------------------- Handshake with device -------------------- */
sdio_claim_host(priv->func);
@@ -273,8 +272,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
if (barker & BARKER_DNLOAD_SYNC_MSK) {
/* Send the same barker back */
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
- buf, iosize);
+ ret = __iwmct_tx(priv, buf, iosize);
if (ret) {
LOG_ERROR(priv, IRQ,
"error %d echoing barker\n", ret);
@@ -292,15 +290,6 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
sdio_release_host(priv->func);
-
- LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
- LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
-
if (priv->dbg.fw_download)
iwmct_fw_load(priv);
else
@@ -312,7 +301,7 @@ exit_release:
sdio_release_host(priv->func);
exit:
kfree(buf);
- LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
}
static void iwmct_irq(struct sdio_func *func)
@@ -325,12 +314,12 @@ static void iwmct_irq(struct sdio_func *func)
priv = sdio_get_drvdata(func);
- LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
/* read the function's status register */
val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
- LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+ LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
if (!val) {
LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
@@ -372,7 +361,7 @@ static void iwmct_irq(struct sdio_func *func)
queue_work(priv->wq, &priv->isr_worker);
- LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
return;
@@ -660,7 +649,7 @@ static int __init iwmct_init(void)
/* Default log filter settings */
iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
- iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
+ iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
rc = sdio_register_driver(&iwmct_driver);
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9257d7ce0378..dadb46a8833a 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1229,8 +1229,8 @@ static void elp_set_mc_list(struct net_device *dev)
/* send a "load multicast list" command to the board, max 10 addrs/cmd */
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * dev->mc_count;
- for (i = 0; i < dev->mc_count; i++) {
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
dmi = dmi->next;
}
@@ -1244,7 +1244,7 @@ static void elp_set_mc_list(struct net_device *dev)
TIMEOUT_MSG(__LINE__);
}
}
- if (dev->mc_count)
+ if (!netdev_mc_empty(dev))
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
else /* num_addrs == 0 */
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9d85efce5916..902435a76466 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1111,12 +1111,14 @@ set_multicast_list(struct net_device *dev)
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
- pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
@@ -1124,7 +1126,7 @@ set_multicast_list(struct net_device *dev)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
- else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 27d80ca5e4c0..6948d667fc5e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -626,7 +626,7 @@ static int init586(struct net_device *dev)
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -771,7 +771,7 @@ static int init586(struct net_device *dev)
* Multicast setup
*/
- if (dev->mc_count) {
+ if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 36c4191e7bca..ce9826980517 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1526,10 +1526,10 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
- dev->mc_count > 10)
+ netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
@@ -1542,16 +1542,17 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if(!lp->mc_list_valid)
{
block[1]=0;
- block[0]=dev->mc_count;
+ block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<dev->mc_count;i++)
+ for(i=0;i<netdev_mc_count(dev);i++)
{
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
dmc=dmc->next;
}
- if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815d..5df46c230b07 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
};
-static struct pci_device_id vortex_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index b1e5764628c6..079d0be37821 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -611,7 +611,7 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9e..60bc0b0ad4f3 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
@@ -899,7 +899,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
@@ -909,7 +909,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -918,8 +918,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daabd..c7d6f094cc7a 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -231,7 +231,7 @@ static const struct {
};
-static struct pci_device_id rtl8139_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
@@ -2509,7 +2509,7 @@ static void __set_rx_mode (struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
@@ -2521,7 +2521,7 @@ static void __set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -2530,8 +2530,7 @@ static void __set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 1663bc9e45de..638ce3b29854 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1505,7 +1505,7 @@ static void set_multicast_list(struct net_device *dev)
int config = 0, cnt;
DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
@@ -1533,7 +1533,7 @@ static void set_multicast_list(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT)
{
cnt = MAX_MC_CNT;
@@ -1541,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1550,7 +1550,7 @@ static void set_multicast_list(struct net_device *dev)
return;
cmd = &lp->mc_cmd;
cmd->cmd.command = CmdMulticastList;
- cmd->mc_cnt = dev->mc_count * 6;
+ cmd->mc_cnt = netdev_mc_count(dev) * 6;
cp = cmd->mc_addrs;
for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
memcpy(cp, dmi->dmi_addr, 6);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd9a09c72dff..069057796bd7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,18 @@ config MACVLAN
To compile this driver as a module, choose M here: the module
will be called macvlan.
+config MACVTAP
+ tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ depends on MACVLAN
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+ can be added in the same way as a macvlan device, using 'type
+ macvlan', and then be accessed through the tap user space interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macvtap.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -868,8 +880,8 @@ config BFIN_RX_DESC_NUM
Set the number of buffer packets used in driver.
config BFIN_MAC_RMII
- bool "RMII PHY Interface (EXPERIMENTAL)"
- depends on BFIN_MAC && EXPERIMENTAL
+ bool "RMII PHY Interface"
+ depends on BFIN_MAC
default y if BFIN527_EZKIT
default n if BFIN537_STAMP
help
@@ -1368,6 +1380,17 @@ config AC3200
To compile this driver as a module, choose M here. The module
will be called ac3200.
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on NET_PCI && PCI
+ select MII
+ select CRC32
+ help
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
config APRICOT
tristate "Apricot Xen-II on board Ethernet"
depends on NET_PCI && ISA
@@ -1883,7 +1906,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1939,6 +1963,7 @@ config ATL2
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
depends on PPC32 || MICROBLAZE
+ select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
@@ -2356,20 +2381,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config GELIC_WIRELESS_OLD_PSK_INTERFACE
- bool "PS3 Wireless private PSK interface (OBSOLETE)"
- depends on GELIC_WIRELESS
- select WEXT_PRIV
- help
- This option retains the obsolete private interface to pass
- the PSK from user space programs to the driver. The PSK
- stands for 'Pre Shared Key' and is used for WPA[2]-PSK
- (WPA-Personal) environment.
- If WPA[2]-PSK is used and you need to use old programs that
- support only this old interface, say Y. Otherwise N.
-
- If unsure, say N.
-
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
depends on FSL_SOC
@@ -2618,6 +2629,28 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
@@ -2756,6 +2789,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ help
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da9..622cfd450d48 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -95,6 +96,7 @@ obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -148,6 +150,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_PPP) += ppp_generic.o
@@ -167,6 +170,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
+obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index b7ec0368d7e8..6a65f660c192 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -619,7 +619,7 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a994753..4ae750ef1e10 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
-static struct pci_device_id acenic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
@@ -2845,7 +2845,7 @@ static void ace_set_multicast_list(struct net_device *dev)
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
- if ((dev->mc_count) && !(ap->mcast_all)) {
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc75..bdffdfb4c88b 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static struct pci_device_id amd8111e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1176,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
- printk("************Driver bug! \
- interrupt while in poll\n");
+ printk("************Driver bug! interrupt while in poll\n");
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
@@ -1388,7 +1387,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
}
else
writel( PROM, lp->mmio + CMD2);
- if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->mc_list = dev->mc_list;
@@ -1396,7 +1396,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return;
}
- if( dev->mc_count == 0 ){
+ if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
lp->mc_list = NULL;
@@ -1410,7 +1410,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
lp->options |= OPTION_MULTICAST_ENABLE;
lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
+ for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < netdev_mc_count(dev);
i++, mc_ptr = mc_ptr->next) {
bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754d..b68e1eb405ff 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct pci_device_id com20020pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b1..08d8be47dae0 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
static irqreturn_t ariadne_interrupt(int irq, void *data);
static int ariadne_close(struct net_device *dev);
static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
-#ifdef HAVE_MULTICAST
static void set_multicast_list(struct net_device *dev);
-#endif
static void memcpyw(volatile u_short *dest, u_short *src, int len)
@@ -821,7 +819,7 @@ static void set_multicast_list(struct net_device *dev)
lance->RDP = PROM; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1,
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 164b37e85eea..1c3c1f94268e 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -680,7 +680,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
#endif
};
-static int __init am79c961_probe(struct platform_device *pdev)
+static int __devinit am79c961_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index c8bc60a7040c..17d85d98987d 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -563,7 +563,7 @@ static void at91ether_sethashtable(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
@@ -592,7 +592,7 @@ static void at91ether_set_multicast_list(struct net_device *dev)
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
- } else if (dev->mc_count > 0) { /* Enable specific multicasts */
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895c..bf72d57a0afd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -20,9 +22,9 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <mach/ep93xx-regs.h>
-#include <mach/platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg);
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
rstat->rstat1 = 0;
if (!(rstat0 & RSTAT0_EOF))
- printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
- printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
- printk(KERN_CRIT "ep93xx_rx: entry mismatch "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
length = rstat1 & RSTAT1_FRAME_LENGTH;
if (length > MAX_PKT_SIZE) {
- printk(KERN_NOTICE "ep93xx_rx: invalid length "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
goto err;
}
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
tstat->tstat0 = 0;
if (tstat0 & TSTAT0_FA)
- printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
- " %.8x\n", tstat0);
+ pr_crit("frame aborted %.8x\n", tstat0);
if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
- printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch "
- " %.8x\n", tstat0);
+ pr_crit("entry mismatch %.8x\n", tstat0);
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
return 1;
}
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n");
+ pr_crit("hw failed to start\n");
return 1;
}
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
}
if (i == 10)
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
}
static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
}
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int data;
- int i;
-
- wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10) {
- printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
- data = 0xffff;
- } else {
- data = rdl(ep, REG_MIIDATA);
- }
-
- return data;
-}
-
-static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int i;
-
- wrl(ep, REG_MIIDATA, data);
- wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10)
- printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
-}
-
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
int err;
if (pdev == NULL)
return -ENODEV;
data = pdev->dev.platform_data;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
dev = ep93xx_dev_alloc(data);
if (dev == NULL) {
err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- ep->res = request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
- dev_name(&pdev->dev));
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- ep->base_addr = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start);
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
if (ep->base_addr == NULL) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_out;
}
- ep->irq = pdev->resource[1].start;
+ ep->irq = irq;
ep->mii.phy_id = data->phy_id;
ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
- ep->irq, data->dev_addr[0], data->dev_addr[1],
- data->dev_addr[2], data->dev_addr[3],
- data->dev_addr[4], data->dev_addr[5]);
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1f7a69c929a6..d9de9bce2395 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -463,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index c3dfbdd2cdcf..1a5f78b160f9 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -737,7 +737,7 @@ static void eth_set_mcast_list(struct net_device *dev)
struct port *port = netdev_priv(dev);
struct dev_mc_list *mclist = dev->mc_list;
u8 diffs[ETH_ALEN], *addr;
- int cnt = dev->mc_count, i;
+ int cnt = netdev_mc_count(dev), i;
if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index be256b34cea8..1dc181a9fbc3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1207,7 +1207,7 @@ ks8695_set_multicast(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
ctrl |= DRXC_RM;
- } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
/* more specific multicast addresses than can be
* handled in hardware
*/
@@ -1216,7 +1216,7 @@ ks8695_set_multicast(struct net_device *ndev)
/* enable specific multicasts */
ctrl &= ~DRXC_RM;
ks8695_init_partial_multicast(ksp, ndev->mc_list,
- ndev->mc_count);
+ netdev_mc_count(ndev));
}
ks8695_writereg(ksp, KS8695_DRXC, ctrl);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b14f4799d5d1..fe60cd02c86c 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -839,12 +839,12 @@ set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
@@ -852,7 +852,7 @@ set_rx_mode(struct net_device *dev)
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index cc9ed8643910..280cfff48b49 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1097,7 +1097,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index efe5435bc3d3..84ae905bf732 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -313,6 +313,9 @@ enum atl1c_rss_type {
enum atl1c_nic_type {
athr_l1c = 0,
athr_l2c = 1,
+ athr_l2c_b,
+ athr_l2c_b2,
+ athr_l1d,
};
enum atl1c_trans_queue {
@@ -426,8 +429,12 @@ struct atl1c_hw {
#define ATL1C_ASPM_L1_SUPPORT 0x0100
#define ATL1C_ASPM_CTRL_MON 0x0200
#define ATL1C_HIB_DISABLE 0x0400
-#define ATL1C_LINK_CAP_1000M 0x0800
-#define ATL1C_FPGA_VERSION 0x8000
+#define ATL1C_APS_MODE_ENABLE 0x0800
+#define ATL1C_LINK_EXT_SYNC 0x1000
+#define ATL1C_CLK_GATING_EN 0x2000
+#define ATL1C_FPGA_VERSION 0x8000
+ u16 link_cap_flags;
+#define ATL1C_LINK_CAP_1000M 0x0001
u16 cmb_tpd;
u16 cmb_rrd;
u16 cmb_rx_timer; /* 2us resolution */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 9b1e0eaebb5c..61a0f2ff11e9 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -37,7 +37,7 @@ static int atl1c_get_settings(struct net_device *netdev,
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M)
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 3e69b940b8f7..f1389d664a21 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -70,17 +70,39 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
u32 otp_ctrl_data;
u32 twsi_ctrl_data;
u8 eth_addr[ETH_ALEN];
+ u16 phy_data;
+ bool raise_vol = false;
/* init */
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- /* Enable OTP CLK */
- if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
- otp_ctrl_data |= OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ /* Enable OTP CLK */
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
+ otp_ctrl_data |= OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFF7F;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x8;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ raise_vol = true;
}
AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
@@ -96,11 +118,31 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return -1;
}
/* Disable OTP_CLK */
- if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
- otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
+ if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
+ otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+ if (raise_vol) {
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x80;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFFF7;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ }
}
/* maybe MAC-address is from BIOS */
@@ -114,6 +156,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return 0;
}
+out:
return -1;
}
@@ -307,7 +350,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL;
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) {
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) {
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
mii_giga_ctrl_data |= ADVERTISE_1000HALF;
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
@@ -389,6 +432,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
int err;
@@ -404,6 +448,21 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
AT_WRITE_FLUSH(hw);
msleep(10);
+ if (hw->nic_type == athr_l2c_b) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
+ msleep(20);
+ }
+
/*Enable PHY LinkChange Interrupt */
err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
if (err) {
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index c2c738df5c63..1eeb3ed9f0cb 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -57,6 +57,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define REG_LINK_CTRL 0x68
#define LINK_CTRL_L0S_EN 0x01
#define LINK_CTRL_L1_EN 0x02
+#define LINK_CTRL_EXT_SYNC 0x80
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xff
@@ -156,6 +157,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
#define PM_CTRL_LCKDET_TIMER_SHIFT 24
+#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
+#define PM_CTRL_SA_DLY_EN 0x20000000
#define PM_CTRL_MAC_ASPM_CHK 0x40000000
#define PM_CTRL_HOTRST 0x80000000
@@ -314,6 +317,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define MAC_CTRL_BC_EN 0x4000000
#define MAC_CTRL_DBG 0x8000000
#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
+#define MAC_CTRL_HASH_ALG_CRC32 0x20000000
+#define MAC_CTRL_SPEED_MODE_SW 0x40000000
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0b..3d4c0a5a77eb 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,11 +21,18 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.0.1-NAPI"
+#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
+#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
+
+#define L2CB_V10 0xc0
+#define L2CB_V11 0xc1
+
/*
* atl1c_pci_tbl - PCI Device ID Table
*
@@ -35,9 +42,12 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1c_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
/* required last entry */
{ 0 }
};
@@ -593,11 +603,18 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
case PCI_DEVICE_ID_ATTANSIC_L2C:
hw->nic_type = athr_l2c;
break;
-
case PCI_DEVICE_ID_ATTANSIC_L1C:
hw->nic_type = athr_l1c;
break;
-
+ case PCI_DEVICE_ID_ATHEROS_L2C_B:
+ hw->nic_type = athr_l2c_b;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L2C_B2:
+ hw->nic_type = athr_l2c_b2;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L1D:
+ hw->nic_type = athr_l1d;
+ break;
default:
break;
}
@@ -620,10 +637,13 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
if (link_ctrl_data & LINK_CTRL_L1_EN)
hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
+ if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
+ hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
- if (hw->nic_type == athr_l1c) {
+ if (hw->nic_type == athr_l1c ||
+ hw->nic_type == athr_l1d) {
hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
- hw->ctrl_flags |= ATL1C_LINK_CAP_1000M;
+ hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
}
return 0;
}
@@ -1234,21 +1254,92 @@ static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
{
u32 pm_ctrl_data;
+ u32 link_ctrl_data;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-
+ AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
+ PM_CTRL_LCKDET_TIMER_SHIFT);
pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data |= PM_CTRL_RBER_EN;
+ pm_ctrl_data |= PM_CTRL_SDES_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
+ if (hw->nic_type == athr_l2c_b &&
+ hw->revision_id == L2CB_V10)
+ link_ctrl_data |= LINK_CTRL_EXT_SYNC;
+ }
+
+ AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
+
+ pm_ctrl_data |= PM_CTRL_PCIE_RECV;
+ pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
+ pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+ pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
+ pm_ctrl_data &= ~PM_CTRL_HOTRST;
+ pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
+ }
if (linkup) {
- pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
- pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l2c_b)
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
+ pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+ if (hw->adapter->link_speed == SPEED_100 ||
+ hw->adapter->link_speed == SPEED_1000) {
+ pm_ctrl_data &=
+ ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ if (hw->nic_type == athr_l1d)
+ pm_ctrl_data |= 0xF <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ else
+ pm_ctrl_data |= 7 <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ }
+ } else {
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ }
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ if (hw->adapter->link_speed == SPEED_10)
+ if (hw->nic_type == athr_l1d)
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+ else if (hw->adapter->link_speed == SPEED_100)
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
- pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
- pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
} else {
pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
@@ -1302,6 +1393,10 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
+ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+ mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
+ mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
+ }
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
}
@@ -2596,11 +2691,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
- dev_dbg(&pdev->dev,
- "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 4a7700620119..76cc043def8c 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -394,7 +394,6 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
int atl1e_phy_commit(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 phy_data;
@@ -415,12 +414,12 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
- dev_err(&pdev->dev,
- "pcie linkdown at least for 25ms\n");
+ netdev_err(adapter->netdev,
+ "pcie linkdown at least for 25ms\n");
return ret_val;
}
- dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
+ netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
}
return 0;
}
@@ -428,7 +427,6 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int atl1e_phy_init(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
s32 ret_val;
u16 phy_val;
@@ -492,20 +490,22 @@ int atl1e_phy_init(struct atl1e_hw *hw)
/*Enable PHY LinkChange Interrupt */
ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
if (ret_val) {
- dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
+ netdev_err(adapter->netdev,
+ "Error enable PHY linkChange Interrupt\n");
return ret_val;
}
/* setup AutoNeg parameters */
ret_val = atl1e_phy_setup_autoneg_adv(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
+ netdev_err(adapter->netdev,
+ "Error Setting up Auto-Negotiation\n");
return ret_val;
}
/* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
- dev_dbg(&pdev->dev, "Restarting Auto-Neg");
+ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
ret_val = atl1e_phy_commit(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Resetting the phy");
+ netdev_err(adapter->netdev, "Error resetting the phy\n");
return ret_val;
}
@@ -559,9 +559,8 @@ int atl1e_reset_hw(struct atl1e_hw *hw)
}
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
- dev_err(&pdev->dev,
- "MAC state machine cann't be idle since"
- " disabled for 10ms second\n");
+ netdev_err(adapter->netdev,
+ "MAC state machine can't be idle since disabled for 10ms second\n");
return AT_ERR_TIMEOUT;
}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9b..7d8de10ba628 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
@@ -164,11 +164,10 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
u16 speed, duplex, phy_data;
- /* MII_BMSR must read twise */
+ /* MII_BMSR must read twice */
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
if ((phy_data & BMSR_LSTATUS) == 0) {
@@ -195,12 +194,11 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1e_setup_mac_ctrl(adapter);
- dev_info(&pdev->dev,
- "%s: %s NIC Link is Up<%d Mbps %s>\n",
- atl1e_driver_name, netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex");
+ netdev_info(netdev,
+ "NIC Link is Up <%d Mbps %s Duplex>\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half");
}
if (!netif_carrier_ok(netdev)) {
@@ -230,7 +228,6 @@ static void atl1e_link_chg_task(struct work_struct *work)
static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u16 phy_data = 0;
u16 link_up = 0;
@@ -243,8 +240,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
if (!link_up) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
- dev_info(&pdev->dev, "%s: %s NIC Link is Down\n",
- atl1e_driver_name, netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
adapter->link_speed = SPEED_0;
netif_stop_queue(netdev);
}
@@ -321,10 +317,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
- dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_irq_disable(adapter);
@@ -345,9 +340,7 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
-
- dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
}
/*
@@ -391,7 +384,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ netdev_warn(adapter->netdev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
@@ -438,7 +431,6 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int retval = 0;
@@ -466,8 +458,8 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
goto out;
}
- dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x",
- data->reg_num, data->val_in);
+ netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
+ data->reg_num, data->val_in);
if (atl1e_write_phy_reg(&adapter->hw,
data->reg_num, data->val_in)) {
retval = -EIO;
@@ -602,7 +594,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->dmaw_dly_cnt = 4;
if (atl1e_alloc_queues(adapter)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -800,8 +792,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_size, &adapter->ring_dma);
if (adapter->ring_vir_addr == NULL) {
- dev_err(&pdev->dev, "pci_alloc_consistent failed, "
- "size = D%d", size);
+ netdev_err(adapter->netdev,
+ "pci_alloc_consistent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -817,7 +809,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- dev_err(&pdev->dev, "kzalloc failed , size = D%d", size);
+ netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
+ size);
err = -ENOMEM;
goto failed;
}
@@ -852,8 +845,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
}
if (unlikely(offset > adapter->ring_size)) {
- dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n",
- offset, adapter->ring_size);
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
err = -1;
goto failed;
}
@@ -1077,7 +1070,6 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
static int atl1e_configure(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 intr_status_data = 0;
@@ -1130,8 +1122,8 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
intr_status_data = AT_READ_REG(hw, REG_ISR);
if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
- dev_err(&pdev->dev, "atl1e_configure failed,"
- "PCIE phy link down\n");
+ netdev_err(adapter->netdev,
+ "atl1e_configure failed, PCIE phy link down\n");
return -1;
}
@@ -1262,7 +1254,6 @@ static irqreturn_t atl1e_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct atl1e_hw *hw = &adapter->hw;
int max_ints = AT_MAX_INT_WORK;
int handled = IRQ_NONE;
@@ -1285,8 +1276,8 @@ static irqreturn_t atl1e_intr(int irq, void *data)
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
- dev_err(&pdev->dev,
- "pcie phy linkdown %x\n", status);
+ netdev_err(adapter->netdev,
+ "pcie phy linkdown %x\n", status);
if (netif_running(adapter->netdev)) {
/* reset MAC */
atl1e_irq_reset(adapter);
@@ -1297,9 +1288,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
/* check if DMA read/write error */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- dev_err(&pdev->dev,
- "PCIE DMA RW error (status = 0x%x)\n",
- status);
+ netdev_err(adapter->netdev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
atl1e_irq_reset(adapter);
schedule_work(&adapter->reset_task);
break;
@@ -1382,7 +1373,6 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
- struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
&adapter->rx_ring;
@@ -1404,11 +1394,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page->read_offset);
/* check sequence number */
if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
- dev_err(&pdev->dev,
- "rx sequence number"
- " error (rx=%d) (expect=%d)\n",
- prrs->seq_num,
- rx_page_desc[que].rx_nxseq);
+ netdev_err(netdev,
+ "rx sequence number error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
rx_page_desc[que].rx_nxseq++;
/* just for debug use */
AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
@@ -1424,9 +1413,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
/* hardware error, discard this packet*/
- dev_err(&pdev->dev,
- "rx packet desc error %x\n",
- *((u32 *)prrs + 1));
+ netdev_err(netdev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
goto skip_pkt;
}
}
@@ -1435,8 +1424,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL) {
- dev_warn(&pdev->dev, "%s: Memory squeeze,"
- "deferring packet.\n", netdev->name);
+ netdev_warn(netdev,
+ "Memory squeeze, deferring packet\n");
goto skip_pkt;
}
skb->dev = netdev;
@@ -1450,9 +1439,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
u16 vlan_tag = (prrs->vtag >> 4) |
((prrs->vtag & 7) << 13) |
((prrs->vtag & 8) << 9);
- dev_dbg(&pdev->dev,
- "RXD VLAN TAG<RRD>=0x%04x\n",
- prrs->vtag);
+ netdev_dbg(netdev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
vlan_tag);
} else {
@@ -1500,7 +1489,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
{
struct atl1e_adapter *adapter =
container_of(napi, struct atl1e_adapter, napi);
- struct pci_dev *pdev = adapter->pdev;
u32 imr_data;
int work_done = 0;
@@ -1519,8 +1507,8 @@ quit_polling:
/* test debug */
if (test_bit(__AT_DOWN, &adapter->flags)) {
atomic_dec(&adapter->irq_sem);
- dev_err(&pdev->dev,
- "atl1e_clean is called when AT_DOWN\n");
+ netdev_err(adapter->netdev,
+ "atl1e_clean is called when AT_DOWN\n");
}
/* reenable RX intr */
/*atl1e_irq_enable(adapter); */
@@ -1618,7 +1606,6 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
- struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
@@ -1642,8 +1629,8 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
- dev_warn(&pdev->dev,
- "IPV4 tso with zero data??\n");
+ netdev_warn(adapter->netdev,
+ "IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
@@ -1672,8 +1659,8 @@ check_sum:
cso = skb_transport_offset(skb);
if (unlikely(cso & 0x1)) {
- dev_err(&adapter->pdev->dev,
- "pay load offset should not ant event number\n");
+ netdev_err(adapter->netdev,
+ "payload offset should not ant event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
@@ -1886,8 +1873,8 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
@@ -1898,13 +1885,13 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
netdev->name, netdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
- dev_dbg(&pdev->dev, "atl1e_request_irq OK\n");
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
return err;
}
@@ -2078,7 +2065,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
- dev_dbg(&pdev->dev, "set phy register failed\n");
+ netdev_dbg(adapter->netdev, "set phy register failed\n");
goto wol_dis;
}
@@ -2100,17 +2087,14 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
}
if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
- dev_dbg(&pdev->dev,
- "%s: Link may change"
- "when suspend\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "Link may change when suspend\n");
}
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
- dev_dbg(&pdev->dev, "%s: read write phy "
- "register failed.\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "read write phy register failed\n");
goto wol_dis;
}
}
@@ -2131,9 +2115,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
- dev_dbg(&pdev->dev,
- "%s: suspend MAC=0x%x\n",
- atl1e_driver_name, mac_ctrl_data);
+ netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
+ mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
@@ -2183,8 +2166,8 @@ static int atl1e_resume(struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "ATL1e: Cannot enable PCI"
- " device from suspend\n");
+ netdev_err(adapter->netdev,
+ "Cannot enable PCI device from suspend\n");
return err;
}
@@ -2315,7 +2298,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
err = atl1e_init_netdev(netdev, pdev);
if (err) {
- dev_err(&pdev->dev, "init netdevice failed\n");
+ netdev_err(netdev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
@@ -2326,7 +2309,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
if (!adapter->hw.hw_addr) {
err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
+ netdev_err(netdev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
@@ -2356,7 +2339,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
/* setup the private structure */
err = atl1e_sw_init(adapter);
if (err) {
- dev_err(&pdev->dev, "net device private data init failed\n");
+ netdev_err(netdev, "net device private data init failed\n");
goto err_sw_init;
}
@@ -2372,22 +2355,19 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
if (atl1e_read_mac_addr(&adapter->hw) != 0) {
err = -EIO;
- dev_err(&pdev->dev, "get mac address failed\n");
+ netdev_err(netdev, "get mac address failed\n");
goto err_eeprom;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "register netdevice failed\n");
+ netdev_err(netdev, "register netdevice failed\n");
goto err_register;
}
@@ -2488,8 +2468,8 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev,
- "ATL1e: Cannot re-enable PCI device after reset.\n");
+ netdev_err(adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -2517,8 +2497,8 @@ static void atl1e_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (atl1e_up(adapter)) {
- dev_err(&pdev->dev,
- "ATL1e: can't bring device back up after reset\n");
+ netdev_err(adapter->netdev,
+ "can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
index b3be59fd3fb5..0ce60b6e7ef0 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -116,7 +116,7 @@ struct atl1e_option {
} arg;
};
-static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -127,16 +127,19 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- dev_info(&pdev->dev, "%s Enabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- dev_info(&pdev->dev, "%s Disabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
+ netdev_info(adapter->netdev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -148,8 +151,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- dev_info(&pdev->dev, "%s\n",
- ent->str);
+ netdev_info(adapter->netdev,
+ "%s\n", ent->str);
return 0;
}
}
@@ -159,8 +162,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
BUG();
}
- dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -176,11 +179,13 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
+
if (bd >= ATL1E_MAX_NIC) {
- dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
- dev_notice(&pdev->dev, "Using defaults for all values\n");
+ netdev_notice(adapter->netdev,
+ "no configuration for board #%i\n", bd);
+ netdev_notice(adapter->netdev,
+ "Using defaults for all values\n");
}
{ /* Transmit Ring Size */
@@ -196,7 +201,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
@@ -215,7 +220,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
@@ -235,7 +240,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
@@ -254,7 +259,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_media_type > bd) {
val = media_type[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127c..9ba547069db3 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
/*
* atl1_pci_tbl - PCI Device ID Table
*/
-static const struct pci_device_id atl1_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
/* required last entry */
{0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ec52529394ad..40cf9e5cb9e2 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
-static struct pci_device_id atl2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2f8261c9614a..a841feb5df20 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -861,7 +861,7 @@ static void set_rx_mode_8002(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
else
lp->addr_mode = CMR2h_Normal;
@@ -877,7 +877,8 @@ static void set_rx_mode_8012(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_mode = CMR2h_PROMISC;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
@@ -885,7 +886,7 @@ static void set_rx_mode_8012(struct net_device *dev)
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next)
{
int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a88..9337d023919c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1013,7 +1013,7 @@ static void au1000_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
@@ -1023,7 +1023,7 @@ static void au1000_multicast_list(struct net_device *dev)
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
(long *)mc_filter);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb69586..9091c6574b1c 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
-static const struct pci_device_id b44_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
@@ -1691,7 +1691,7 @@ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
struct dev_mc_list *mclist;
int i, num_ents;
- num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
mclist = dev->mc_list;
for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
__b44_cam_write(bp, mclist->dmi_addr, i + 1);
@@ -1716,7 +1716,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > B44_MCAST_TABLE_SIZE))
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0bd47d32ec42..0927ffa0d753 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -619,7 +619,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
/* only 3 perfect match registers left, first one is used for
* own mac address */
- if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
@@ -632,7 +632,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
for (i = 0, mc_list = dev->mc_list;
- (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
+ (mc_list != NULL) && (i < netdev_mc_count(dev)) && (i < 3);
i++, mc_list = mc_list->next) {
u8 *dmi_addr;
u32 tmp;
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
index fdb6e81a4374..1a41a49bb619 100644
--- a/drivers/net/benet/Kconfig
+++ b/drivers/net/benet/Kconfig
@@ -1,6 +1,6 @@
config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI && INET
help
This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine 2.
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 5bc74590c73e..5038c16bfe9b 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,22 +38,20 @@
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
-#define DRV_DESC BE_NAME "Driver"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x701
-#define OC_DEVICE_ID3 0x710
+#define OC_DEVICE_ID2 0x710
static inline char *nic_name(struct pci_dev *pdev)
{
switch (pdev->device) {
case OC_DEVICE_ID1:
- case OC_DEVICE_ID2:
return OC_NAME;
- case OC_DEVICE_ID3:
+ case OC_DEVICE_ID2:
return OC_NAME1;
case BE_DEVICE_ID2:
return BE3_NAME;
@@ -252,7 +250,8 @@ struct be_adapter {
bool rx_post_starved; /* Zero rx frags have been posted to BE */
struct vlan_group *vlan_grp;
- u16 num_vlans;
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
struct be_dma_mem mc_cmd_mem;
@@ -266,6 +265,7 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
+ bool eeh_err;
bool link_up;
u32 port_num;
bool promiscuous;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 006cb2efcd22..477f82bc647e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -167,7 +167,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
@@ -198,6 +205,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
val |= MPU_MAILBOX_DB_HI_MASK;
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -397,6 +409,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = (u8 *)wrb_from_mbox(adapter);
@@ -769,6 +784,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -857,6 +875,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
struct be_cmd_req_if_destroy *req;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -1375,7 +1396,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req = cmd->va;
+ struct be_cmd_write_flashrom *req;
struct be_sge *sge;
int status;
@@ -1409,7 +1430,8 @@ err:
return status;
}
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -1430,9 +1452,9 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
- req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = 0x3FFFC;
+ req->params.offset = offset;
req->params.data_buf_size = 0x4;
status = be_mcc_notify_wait(adapter);
@@ -1608,3 +1630,33 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 13b33c841083..7297b5a47657 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -855,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
u8 rcv_buff[4096];
};
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -917,7 +931,8 @@ extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
-extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
extern int be_cmd_fw_init(struct be_adapter *adapter);
@@ -927,5 +942,8 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
+
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac1..dcc7f37b5428 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -112,6 +112,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"PHY Loopback test",
"External Loopback test",
"DDR DMA test"
+ "Link test"
};
#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
@@ -529,6 +530,9 @@ static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ bool link_up;
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
@@ -545,12 +549,20 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
&data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
+ }
- data[3] = be_test_ddr_dma(adapter);
- if (data[3] != 0)
- test->flags |= ETH_TEST_FL_FAILED;
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
}
+ if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+ &qos_link_speed) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (mac_speed) {
+ data[4] = 1;
+ }
}
static int
@@ -567,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, file_name);
}
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
+ &eeprom_cmd.dma);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index e2b3beffd49d..bb2ae6f924db 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,63 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_BITFILE 13
+#define IMG_TYPE_NCSI_8051 14
+
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+
+
+
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
@@ -107,6 +164,7 @@
#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
#define EQ_ENTRY_RES_ID_SHIFT 16
+
struct be_eq_entry {
u32 evt;
};
@@ -221,41 +279,6 @@ struct be_eth_rx_compl {
u32 dw[4];
};
-/* Flashrom related descriptors */
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
-#define FLASHROM_TYPE_REDBOOT 1
-#define FLASHROM_TYPE_BIOS 2
-#define FLASHROM_TYPE_PXE_BIOS 3
-#define FLASHROM_TYPE_FCOE_BIOS 8
-#define FLASHROM_TYPE_ISCSI_BACKUP 9
-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
-
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
-#define FLASH_iSCSI_BIOS_START (7340032)
-#define FLASH_PXE_BIOS_START (7864320)
-#define FLASH_FCoE_BIOS_START (524288)
-#define FLASH_REDBOOT_START (32768)
-#define FLASH_REDBOOT_ISM_START (0)
-
struct controller_id {
u32 vendor;
u32 device;
@@ -263,7 +286,20 @@ struct controller_id {
u32 subdevice;
};
-struct flash_file_hdr {
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
u8 sign[32];
u32 cksum;
u32 antidote;
@@ -275,6 +311,17 @@ struct flash_file_hdr {
u8 build[24];
};
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
struct flash_section_hdr {
u32 format_rev;
u32 cksum;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 626b76c0ebc7..cbfaa3feb7c4 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -34,7 +34,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -69,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
u32 reg = ioread32(addr);
u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (adapter->eeh_err)
+ return;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
@@ -100,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clear_int)
@@ -113,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -474,10 +484,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
"MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -487,17 +499,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
}
/*
- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
- * set the BE in promiscuous VLAN mode.
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
*/
static int be_vid_config(struct be_adapter *adapter)
{
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
- int status;
+ int status = 0;
- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
+ if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (adapter->vlan_tag[i]) {
@@ -531,21 +542,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans++;
adapter->vlan_tag[vid] = 1;
-
- be_vid_config(adapter);
+ adapter->vlans_added++;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter);
}
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans--;
adapter->vlan_tag[vid] = 0;
-
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
- be_vid_config(adapter);
+ adapter->vlans_added--;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter);
}
static void be_set_multicast_list(struct net_device *netdev)
@@ -565,14 +576,15 @@ static void be_set_multicast_list(struct net_device *netdev)
}
/* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
&adapter->mc_cmd_mem);
goto done;
}
be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev->mc_count, &adapter->mc_cmd_mem);
+ netdev_mc_count(netdev), &adapter->mc_cmd_mem);
done:
return;
}
@@ -634,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user)
+ if (rx_page_info->last_page_user) {
pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ rx_page_info->last_page_user = false;
+ }
atomic_dec(&rxq->used);
return rx_page_info;
@@ -704,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->data_len = curr_frag_len - hdr_len;
skb->tail += hdr_len;
}
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
@@ -737,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -782,7 +796,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->dev = adapter->netdev;
if (vlanf) {
- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
+ if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
@@ -862,7 +876,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = be16_to_cpu(vid);
- if (!adapter->vlan_grp || adapter->num_vlans == 0)
+ if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -1798,15 +1812,19 @@ char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
int status;
- crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
- + sizeof(struct flash_file_hdr) - 32*1024;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc);
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (img_start + image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
"could not get crc from flash, not flashing redboot\n");
@@ -1818,102 +1836,124 @@ static bool be_flash_redboot(struct be_adapter *adapter,
return false;
else
return true;
-
}
-static int be_flash_image(struct be_adapter *adapter,
+static int be_flash_data(struct be_adapter *adapter,
const struct firmware *fw,
- struct be_dma_mem *flash_cmd, u32 flash_type)
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
{
- int status;
- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
-
- switch (flash_type) {
- case FLASHROM_TYPE_ISCSI_ACTIVE:
- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_ISCSI_BACKUP:
- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_BACKUP:
- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_BIOS:
- image_offset = FLASH_iSCSI_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_BIOS:
- image_offset = FLASH_FCoE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_PXE_BIOS:
- image_offset = FLASH_PXE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_REDBOOT:
- if (!be_flash_redboot(adapter, fw->data))
- return 0;
- image_offset = FLASH_REDBOOT_ISM_START;
- image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
- break;
- default:
- return 0;
+ struct flash_comp *pflashcomp;
+
+ struct flash_comp gen3_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3}
+ };
+ struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
}
-
- p += sizeof(struct flash_file_hdr) + image_offset;
- if (p + image_size > fw->data + fw->size)
+ for (i = 0; i < 8; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size)))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
-
- total_bytes = image_size;
-
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
-
- if (!total_bytes)
- flash_op = FLASHROM_OPER_FLASH;
- else
- flash_op = FLASHROM_OPER_SAVE;
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- flash_type, flash_op, num_bytes);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed. type/op %d/%d\n",
- flash_type, flash_op);
- return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+
+ if (!total_bytes)
+ flash_op = FLASHROM_OPER_FLASH;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ yield();
}
- yield();
}
-
return 0;
}
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
int be_load_fw(struct be_adapter *adapter, u8 *func)
{
char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
const struct firmware *fw;
- struct flash_file_hdr *fhdr;
- struct flash_section_info *fsec = NULL;
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
- int status;
+ int status, i = 0;
const u8 *p;
- bool entry_found = false;
- int flash_type;
char fw_ver[FW_VER_LEN];
char fw_cfg;
@@ -1931,34 +1971,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
p = fw->data;
- fhdr = (struct flash_file_hdr *) p;
- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
- dev_err(&adapter->pdev->dev,
- "Firmware(%s) load error (signature did not match)\n",
- fw_file);
- status = -1;
- goto fw_exit;
- }
-
+ fhdr = (struct flash_file_hdr_g2 *) p;
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
- p += sizeof(struct flash_file_hdr);
- while (p < (fw->data + fw->size)) {
- fsec = (struct flash_section_info *)p;
- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
- entry_found = true;
- break;
- }
- p += 32;
- }
-
- if (!entry_found) {
- status = -1;
- dev_err(&adapter->pdev->dev,
- "Flash cookie not found in firmware image\n");
- goto fw_exit;
- }
-
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
&flash_cmd.dma);
@@ -1969,12 +1984,26 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
}
- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
- status = be_flash_image(adapter, fw, &flash_cmd,
- flash_type);
- if (status)
- break;
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ for (i = 0; i < fhdr3->num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (img_hdr_ptr->imageid == 1) {
+ status = be_flash_data(adapter, fw,
+ &flash_cmd, fhdr3->num_imgs);
+ }
+
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
}
pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -2136,6 +2165,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ pci_save_state(adapter->pdev);
return 0;
free_mbox:
@@ -2222,6 +2252,11 @@ static int be_get_config(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (adapter->cap & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
return 0;
}
@@ -2394,13 +2429,102 @@ static int be_resume(struct pci_dev *pdev)
return 0;
}
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ return;
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume
+ .resume = be_resume,
+ .err_handler = &be_eeh_handlers
};
static int __init be_init_module(void)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b23bc4f56c6..ef7f77113e26 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -819,7 +819,7 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
emac_hashhi = emac_hashlo = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -862,7 +862,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9b587c344194..189fa69c2094 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -973,7 +973,7 @@ static void bmac_set_multicast(struct net_device *dev)
{
struct dev_mc_list *dmi;
struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
int i;
@@ -982,7 +982,7 @@ static void bmac_set_multicast(struct net_device *dev)
XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1021,7 +1021,7 @@ static void bmac_set_multicast(struct net_device *dev)
unsigned short rx_cfg;
u32 crc;
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
bmwrite(dev, BHASH0, 0xffff);
bmwrite(dev, BHASH1, 0xffff);
bmwrite(dev, BHASH2, 0xffff);
@@ -1039,7 +1039,7 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- for(i = 0; i < dev->mc_count; i++) {
+ for(i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e4..5f0dda10f20d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2009 Broadcom Corporation
+ * Copyright (c) 2004-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,7 +48,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/list.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -59,13 +58,13 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "2.0.3"
-#define DRV_MODULE_RELDATE "Dec 03, 2009"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
+#define DRV_MODULE_VERSION "2.0.8"
+#define DRV_MODULE_RELDATE "Feb 15, 2010"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
+#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
#define RUN_AT(x) (jiffies + (x))
@@ -1278,7 +1277,7 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
if (lo_water >= bp->rx_ring_size)
lo_water = 0;
- hi_water = bp->rx_ring_size / 4;
+ hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
if (hi_water <= lo_water)
lo_water = 0;
@@ -3561,7 +3560,7 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
@@ -3579,14 +3578,14 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
+ if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
/* Add all entries into to the match filter list */
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
@@ -4941,7 +4940,7 @@ bnx2_init_chip(struct bnx2 *bp)
BNX2_HC_CONFIG_COLLECT_STATS;
}
- if (bp->irq_nvecs > 1) {
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
BNX2_HC_MSIX_BIT_VECTOR_VAL);
@@ -6145,6 +6144,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -6227,6 +6230,8 @@ bnx2_open(struct net_device *dev)
atomic_set(&bp->intr_sem, 0);
+ memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
bnx2_enable_int(bp);
if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6538,92 +6543,121 @@ bnx2_close(struct net_device *dev)
return 0;
}
-#define GET_NET_STATS64(ctr) \
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ int i;
+
+ /* The 1st 10 counters are 64-bit counters */
+ for (i = 0; i < 20; i += 2) {
+ u32 hi;
+ u64 lo;
+
+ hi = temp_stats[i] + hw_stats[i];
+ lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+ if (lo > 0xffffffff)
+ hi++;
+ temp_stats[i] = hi;
+ temp_stats[i + 1] = lo & 0xffffffff;
+ }
+
+ for ( ; i < sizeof(struct statistics_block) / 4; i++)
+ temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr) \
(unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
(unsigned long) (ctr##_lo)
-#define GET_NET_STATS32(ctr) \
+#define GET_64BIT_NET_STATS32(ctr) \
(ctr##_lo)
#if (BITS_PER_LONG == 64)
-#define GET_NET_STATS GET_NET_STATS64
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
#else
-#define GET_NET_STATS GET_NET_STATS32
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
#endif
+#define GET_32BIT_NET_STATS(ctr) \
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
static struct net_device_stats *
bnx2_get_stats(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- struct statistics_block *stats_blk = bp->stats_blk;
struct net_device_stats *net_stats = &dev->stats;
if (bp->stats_blk == NULL) {
return net_stats;
}
net_stats->rx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
net_stats->tx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
net_stats->rx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCInOctets);
+ GET_64BIT_NET_STATS(stat_IfHCInOctets);
net_stats->tx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
+ GET_64BIT_NET_STATS(stat_IfHCOutOctets);
net_stats->multicast =
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
net_stats->collisions =
- (unsigned long) stats_blk->stat_EtherStatsCollisions;
+ GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
net_stats->rx_length_errors =
- (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
- stats_blk->stat_EtherStatsOverrsizePkts);
+ GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+ GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
net_stats->rx_over_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
net_stats->rx_frame_errors =
- (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
net_stats->rx_crc_errors =
- (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
net_stats->rx_errors = net_stats->rx_length_errors +
net_stats->rx_over_errors + net_stats->rx_frame_errors +
net_stats->rx_crc_errors;
net_stats->tx_aborted_errors =
- (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
- stats_blk->stat_Dot3StatsLateCollisions);
+ GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+ GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
(CHIP_ID(bp) == CHIP_ID_5708_A0))
net_stats->tx_carrier_errors = 0;
else {
net_stats->tx_carrier_errors =
- (unsigned long)
- stats_blk->stat_Dot3StatsCarrierSenseErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
}
net_stats->tx_errors =
- (unsigned long)
- stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
- +
+ GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
net_stats->tx_aborted_errors +
net_stats->tx_carrier_errors;
net_stats->rx_missed_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
return net_stats;
}
@@ -6717,32 +6751,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
- cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
-
- /* allow advertising 1 speed */
- if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
- (cmd->advertising == ADVERTISED_10baseT_Full) ||
- (cmd->advertising == ADVERTISED_100baseT_Half) ||
- (cmd->advertising == ADVERTISED_100baseT_Full)) {
-
- if (cmd->port == PORT_FIBRE)
- goto err_out_unlock;
-
- advertising = cmd->advertising;
-
- } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
- if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
- (cmd->port == PORT_TP))
- goto err_out_unlock;
- } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
- advertising = cmd->advertising;
- else if (cmd->advertising == ADVERTISED_1000baseT_Half)
- goto err_out_unlock;
- else {
- if (cmd->port == PORT_FIBRE)
- advertising = ETHTOOL_ALL_FIBRE_SPEED;
- else
+ advertising = cmd->advertising;
+ if (cmd->port == PORT_TP) {
+ advertising &= ETHTOOL_ALL_COPPER_SPEED;
+ if (!advertising)
advertising = ETHTOOL_ALL_COPPER_SPEED;
+ } else {
+ advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+ if (!advertising)
+ advertising = ETHTOOL_ALL_FIBRE_SPEED;
}
advertising |= ADVERTISED_Autoneg;
}
@@ -7083,6 +7100,9 @@ static int
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
{
if (netif_running(bp->dev)) {
+ /* Reset will erase chipset stats; save them */
+ bnx2_save_stats(bp);
+
bnx2_netif_stop(bp);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
@@ -7104,6 +7124,13 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
dev_close(bp->dev);
return rc;
}
+#ifdef BCM_CNIC
+ mutex_lock(&bp->cnic_lock);
+ /* Let cnic know about the new status block. */
+ if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+ bnx2_setup_cnic_irq_info(bp);
+ mutex_unlock(&bp->cnic_lock);
+#endif
bnx2_netif_start(bp);
}
return 0;
@@ -7427,6 +7454,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
struct bnx2 *bp = netdev_priv(dev);
int i;
u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
u8 *stats_len_arr = NULL;
if (hw_stats == NULL) {
@@ -7443,21 +7471,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
stats_len_arr = bnx2_5708_stats_len_arr;
for (i = 0; i < BNX2_NUM_STATS; i++) {
+ unsigned long offset;
+
if (stats_len_arr[i] == 0) {
/* skip this counter */
buf[i] = 0;
continue;
}
+
+ offset = bnx2_stats_offset_arr[i];
if (stats_len_arr[i] == 4) {
/* 4-byte counter */
- buf[i] = (u64)
- *(hw_stats + bnx2_stats_offset_arr[i]);
+ buf[i] = (u64) *(hw_stats + offset) +
+ *(temp_stats + offset);
continue;
}
/* 8-byte counter */
- buf[i] = (((u64) *(hw_stats +
- bnx2_stats_offset_arr[i])) << 32) +
- *(hw_stats + bnx2_stats_offset_arr[i] + 1);
+ buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+ *(hw_stats + offset + 1) +
+ (((u64) *(temp_stats + offset)) << 32) +
+ *(temp_stats + offset + 1);
}
}
@@ -7625,7 +7658,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
}
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void
poll_bnx2(struct net_device *dev)
{
@@ -7825,6 +7858,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags = 0;
bp->phy_flags = 0;
+ bp->temp_stats_blk =
+ kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+ if (bp->temp_stats_blk == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
@@ -8229,7 +8270,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2,
#endif
};
@@ -8346,6 +8387,8 @@ bnx2_remove_one(struct pci_dev *pdev)
if (bp->regview)
iounmap(bp->regview);
+ kfree(bp->temp_stats_blk);
+
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a0..cd4b0e4637ab 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -349,7 +349,7 @@ struct l2_fhdr {
#define BNX2_L2CTX_BD_PRE_READ 0x00000000
#define BNX2_L2CTX_CTX_SIZE 0x00000000
#define BNX2_L2CTX_CTX_TYPE 0x00000000
-#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 32
+#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4
#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
@@ -6851,6 +6851,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
struct statistics_block *stats_blk;
+ struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
int ctx_pages;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e2..6d8559052ee6 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -140,7 +140,7 @@ static struct {
};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -11471,7 +11471,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
@@ -11482,7 +11483,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
bnx2x_sp(bp, mcast_config);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
config->config_table[i].
@@ -11554,7 +11555,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
@@ -11731,7 +11732,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -11755,7 +11756,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index efa0e41bf3ec..1787e3c86573 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
unsigned char *arp_ptr;
__be32 sip, tip;
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ /*
+ * When using VLANS and bonding, dev and oriv_dev may be
+ * incorrect if the physical interface supports VLAN
+ * acceleration. With this change ARP validation now
+ * works for hosts only reachable on the VLAN interface.
+ */
+ dev = vlan_dev_real_dev(dev);
+ orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
+ }
+
if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
goto out;
@@ -3296,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
-static void bond_create_proc_dir(struct bond_net *bn)
+static void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
{
}
-static void bond_create_proc_dir(struct bond_net *bn)
+static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
@@ -3731,7 +3742,7 @@ static int bond_close(struct net_device *bond_dev)
static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device_stats *stats = &bond->stats;
+ struct net_device_stats *stats = &bond_dev->stats;
struct net_device_stats local_stats;
struct slave *slave;
int i;
@@ -4944,7 +4955,7 @@ out_netdev:
goto out;
}
-static int bond_net_init(struct net *net)
+static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
@@ -4956,7 +4967,7 @@ static int bond_net_init(struct net *net)
return 0;
}
-static void bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 558ec1352527..257a7a4dfce9 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -197,7 +197,6 @@ struct bonding {
s8 send_grat_arp;
s8 send_unsol_na;
s8 setup_by_slave;
- struct net_device_stats stats;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c0..a2f29a38798a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523cc..bf7f9ba2d903 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 val;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
/* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
priv->can.bittiming_const = &bfin_can_bittiming_const;
priv->can.do_set_bittiming = bfin_can_set_bittiming;
priv->can.do_set_mode = bfin_can_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
return dev;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322b..f08f1202ff00 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,6 +592,8 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 1a72ca066a17..f8cc168ec76c 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -180,6 +180,14 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
#define GET_BYTE(val, byte) \
(((val) >> ((byte) * 8)) & 0xff)
@@ -219,7 +227,8 @@ struct mcp251x_priv {
struct net_device *net;
struct spi_device *spi;
- struct mutex spi_lock; /* SPI buffer lock */
+ struct mutex mcp_lock; /* SPI device lock */
+
u8 *spi_tx_buf;
u8 *spi_rx_buf;
dma_addr_t spi_tx_dma;
@@ -227,11 +236,11 @@ struct mcp251x_priv {
struct sk_buff *tx_skb;
int tx_len;
+
struct workqueue_struct *wq;
struct work_struct tx_work;
- struct work_struct irq_work;
- struct completion awake;
- int wake;
+ struct work_struct restart_work;
+
int force_quit;
int after_suspend;
#define AFTER_SUSPEND_UP 1
@@ -245,7 +254,8 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- net->stats.tx_errors++;
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
if (priv->tx_skb)
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
@@ -300,16 +310,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
u8 val = 0;
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
mcp251x_spi_trans(spi, 3);
val = priv->spi_rx_buf[2];
- mutex_unlock(&priv->spi_lock);
-
return val;
}
@@ -317,15 +323,11 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
mcp251x_spi_trans(spi, 3);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -333,16 +335,12 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
mcp251x_spi_trans(spi, 4);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
@@ -358,10 +356,8 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
buf[i]);
} else {
- mutex_lock(&priv->spi_lock);
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
mcp251x_spi_trans(spi, TXBDAT_OFF + len);
- mutex_unlock(&priv->spi_lock);
}
}
@@ -408,13 +404,9 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-
- mutex_unlock(&priv->spi_lock);
}
}
@@ -467,21 +459,6 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
-static void mcp251x_hw_wakeup(struct spi_device *spi)
-{
- struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
-
- priv->wake = 1;
-
- /* Can only wake up by generating a wake-up interrupt. */
- mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
- mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
-
- /* Wait until the device is awake */
- if (!wait_for_completion_timeout(&priv->awake, HZ))
- dev_err(&spi->dev, "MCP251x didn't wake-up\n");
-}
-
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -490,16 +467,11 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
if (priv->tx_skb || priv->tx_len) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
- netif_stop_queue(net);
return NETDEV_TX_BUSY;
}
- if (skb->len != sizeof(struct can_frame)) {
- dev_err(&spi->dev, "dropping packet - bad length\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
+ if (can_dropped_invalid_skb(net, skb))
return NETDEV_TX_OK;
- }
netif_stop_queue(net);
priv->tx_skb = skb;
@@ -515,12 +487,13 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
+ mcp251x_clean(net);
/* We have to delay work since SPI I/O may sleep */
priv->can.state = CAN_STATE_ERROR_ACTIVE;
priv->restart_tx = 1;
if (priv->can.restart_ms == 0)
priv->after_suspend = AFTER_SUSPEND_RESTART;
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
break;
default:
return -EOPNOTSUPP;
@@ -529,7 +502,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
return 0;
}
-static void mcp251x_set_normal_mode(struct spi_device *spi)
+static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
unsigned long timeout;
@@ -537,12 +510,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
- CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
- CANINTF_MERRF);
+ CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* Put device into loopback mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
} else {
/* Put device into normal mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
@@ -554,11 +529,12 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
if (time_after(jiffies, timeout)) {
dev_err(&spi->dev, "MCP251x didn't"
" enter in normal mode\n");
- return;
+ return -EBUSY;
}
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
}
static int mcp251x_do_set_bittiming(struct net_device *net)
@@ -589,33 +565,39 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
{
mcp251x_do_set_bittiming(net);
- /* Enable RX0->RX1 buffer roll over and disable filters */
- mcp251x_write_bits(spi, RXBCTRL(0),
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
- mcp251x_write_bits(spi, RXBCTRL(1),
- RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(0),
+ RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(1),
+ RXBCTRL_RXM0 | RXBCTRL_RXM1);
return 0;
}
-static void mcp251x_hw_reset(struct spi_device *spi)
+static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
int ret;
-
- mutex_lock(&priv->spi_lock);
+ unsigned long timeout;
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-
ret = spi_write(spi, priv->spi_tx_buf, 1);
-
- mutex_unlock(&priv->spi_lock);
-
- if (ret)
+ if (ret) {
dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
+ return -EIO;
+ }
+
/* Wait for reset to finish */
+ timeout = jiffies + HZ;
mdelay(10);
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
+ != CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't"
+ " enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
+ return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -639,63 +621,17 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
-static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
-{
- struct net_device *net = (struct net_device *)dev_id;
- struct mcp251x_priv *priv = netdev_priv(net);
-
- /* Schedule bottom half */
- if (!work_pending(&priv->irq_work))
- queue_work(priv->wq, &priv->irq_work);
-
- return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
+static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- ret = open_candev(net);
- if (ret) {
- dev_err(&spi->dev, "unable to set initial baudrate!\n");
- return ret;
- }
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
-
- priv->force_quit = 0;
- priv->tx_skb = NULL;
- priv->tx_len = 0;
-
- ret = request_irq(spi->irq, mcp251x_can_isr,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
- if (ret) {
- dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
-
- mcp251x_hw_wakeup(spi);
- mcp251x_hw_reset(spi);
- ret = mcp251x_setup(net, priv, spi);
- if (ret) {
- free_irq(spi->irq, net);
- mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
- mcp251x_set_normal_mode(spi);
- netif_wake_queue(net);
-
- return 0;
+ pdata->transceiver_enable(0);
+ close_candev(net);
}
static int mcp251x_stop(struct net_device *net)
@@ -706,17 +642,19 @@ static int mcp251x_stop(struct net_device *net)
close_candev(net);
+ priv->force_quit = 1;
+ free_irq(spi->irq, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->mcp_lock);
+
/* Disable and clear pending interrupts */
mcp251x_write_reg(spi, CANINTE, 0x00);
mcp251x_write_reg(spi, CANINTF, 0x00);
- priv->force_quit = 1;
- free_irq(spi->irq, net);
- flush_workqueue(priv->wq);
-
mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
+ mcp251x_clean(net);
mcp251x_hw_sleep(spi);
@@ -725,9 +663,27 @@ static int mcp251x_stop(struct net_device *net)
priv->can.state = CAN_STATE_STOPPED;
+ mutex_unlock(&priv->mcp_lock);
+
return 0;
}
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_err_skb(net, &frame);
+ if (skb) {
+ frame->can_id = can_id;
+ frame->data[1] = data1;
+ netif_rx(skb);
+ } else {
+ dev_err(&net->dev,
+ "cannot allocate error skb\n");
+ }
+}
+
static void mcp251x_tx_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
@@ -736,33 +692,32 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
struct net_device *net = priv->net;
struct can_frame *frame;
+ mutex_lock(&priv->mcp_lock);
if (priv->tx_skb) {
- frame = (struct can_frame *)priv->tx_skb->data;
-
if (priv->can.state == CAN_STATE_BUS_OFF) {
mcp251x_clean(net);
- netif_wake_queue(net);
- return;
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+ if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ mcp251x_hw_tx(spi, frame, 0);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
}
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
- mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->can_dlc;
- can_put_echo_skb(priv->tx_skb, net, 0);
- priv->tx_skb = NULL;
}
+ mutex_unlock(&priv->mcp_lock);
}
-static void mcp251x_irq_work_handler(struct work_struct *ws)
+static void mcp251x_restart_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
- irq_work);
+ restart_work);
struct spi_device *spi = priv->spi;
struct net_device *net = priv->net;
- u8 txbnctrl;
- u8 intf;
- enum can_state new_state;
+ mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mdelay(10);
mcp251x_hw_reset(spi);
@@ -771,45 +726,54 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
netif_device_attach(net);
- /* Clean since we lost tx buffer */
- if (priv->tx_skb || priv->tx_len) {
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
+ mcp251x_clean(net);
mcp251x_set_normal_mode(spi);
+ netif_wake_queue(net);
} else {
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
+ priv->force_quit = 0;
}
- if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
- return;
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ mcp251x_write_reg(spi, TXBCTRL(0), 0);
+ mcp251x_clean(net);
+ netif_wake_queue(net);
+ mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+}
- while (!priv->force_quit && !freezing(current)) {
- u8 eflag = mcp251x_read_reg(spi, EFLG);
- int can_id = 0, data1 = 0;
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+ struct mcp251x_priv *priv = dev_id;
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
- mcp251x_write_reg(spi, EFLG, 0x00);
+ mutex_lock(&priv->mcp_lock);
+ while (!priv->force_quit) {
+ enum can_state new_state;
+ u8 intf = mcp251x_read_reg(spi, CANINTF);
+ u8 eflag;
+ int can_id = 0, data1 = 0;
- if (priv->restart_tx) {
- priv->restart_tx = 0;
- mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- can_id |= CAN_ERR_RESTARTED;
+ if (intf & CANINTF_RX0IF) {
+ mcp251x_hw_rx(spi, 0);
+ /* Free one buffer ASAP */
+ mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
+ 0x00);
}
- if (priv->wake) {
- /* Wait whilst the device wakes up */
- mdelay(10);
- priv->wake = 0;
- }
+ if (intf & CANINTF_RX1IF)
+ mcp251x_hw_rx(spi, 1);
- intf = mcp251x_read_reg(spi, CANINTF);
mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+ eflag = mcp251x_read_reg(spi, EFLG);
+ mcp251x_write_reg(spi, EFLG, 0x00);
+
/* Update can state */
if (eflag & EFLG_TXBO) {
new_state = CAN_STATE_BUS_OFF;
@@ -850,59 +814,31 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
}
priv->can.state = new_state;
- if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
- struct sk_buff *skb;
- struct can_frame *frame;
-
- /* Create error frame */
- skb = alloc_can_err_skb(net, &frame);
- if (skb) {
- /* Set error frame flags based on bus state */
- frame->can_id = can_id;
- frame->data[1] = data1;
-
- /* Update net stats for overflows */
- if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
- if (eflag & EFLG_RX0OVR)
- net->stats.rx_over_errors++;
- if (eflag & EFLG_RX1OVR)
- net->stats.rx_over_errors++;
- frame->can_id |= CAN_ERR_CRTL;
- frame->data[1] |=
- CAN_ERR_CRTL_RX_OVERFLOW;
- }
-
- netif_rx(skb);
- } else {
- dev_info(&spi->dev,
- "cannot allocate error skb\n");
+ if (intf & CANINTF_ERRIF) {
+ /* Handle overflow counters */
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+ if (eflag & EFLG_RX0OVR)
+ net->stats.rx_over_errors++;
+ if (eflag & EFLG_RX1OVR)
+ net->stats.rx_over_errors++;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
}
+ mcp251x_error_skb(net, can_id, data1);
}
if (priv->can.state == CAN_STATE_BUS_OFF) {
if (priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
can_bus_off(net);
mcp251x_hw_sleep(spi);
- return;
+ break;
}
}
if (intf == 0)
break;
- if (intf & CANINTF_WAKIF)
- complete(&priv->awake);
-
- if (intf & CANINTF_MERRF) {
- /* If there are pending Tx buffers, restart queue */
- txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
- if (!(txbnctrl & TXBCTRL_TXREQ)) {
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
- }
-
if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
@@ -913,12 +849,66 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
netif_wake_queue(net);
}
- if (intf & CANINTF_RX0IF)
- mcp251x_hw_rx(spi, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+ return IRQ_HANDLED;
+}
- if (intf & CANINTF_RX1IF)
- mcp251x_hw_rx(spi, 1);
+static int mcp251x_open(struct net_device *net)
+{
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+ struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret) {
+ dev_err(&spi->dev, "unable to set initial baudrate!\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->mcp_lock);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(1);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+
+ ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+ IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+ close_candev(net);
+ goto open_unlock;
+ }
+
+ priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+ INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+ ret = mcp251x_hw_reset(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_setup(net, priv, spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
}
+ ret = mcp251x_set_normal_mode(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ netif_wake_queue(net);
+
+open_unlock:
+ mutex_unlock(&priv->mcp_lock);
+ return ret;
}
static const struct net_device_ops mcp251x_netdev_ops = {
@@ -952,11 +942,13 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->net = net;
dev_set_drvdata(&spi->dev, priv);
priv->spi = spi;
- mutex_init(&priv->spi_lock);
+ mutex_init(&priv->mcp_lock);
/* If requested, allocate DMA buffers */
if (mcp251x_enable_dma) {
@@ -1005,18 +997,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
-
- INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
- INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
-
- init_completion(&priv->awake);
-
/* Configure the SPI bus */
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
+ /* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
dev_info(&spi->dev, "Probe failed\n");
goto error_probe;
@@ -1059,10 +1045,6 @@ static int __devexit mcp251x_can_remove(struct spi_device *spi)
unregister_candev(net);
free_candev(net);
- priv->force_quit = 1;
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
@@ -1084,6 +1066,12 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
+ priv->force_quit = 1;
+ disable_irq(spi->irq);
+ /*
+ * Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
if (netif_running(net)) {
netif_device_detach(net);
@@ -1110,16 +1098,18 @@ static int mcp251x_can_resume(struct spi_device *spi)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
pdata->power_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
if (pdata->transceiver_enable)
pdata->transceiver_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
}
}
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
return 0;
}
#else
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375d..27d1d398e25e 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
config CAN_MPC5XXX
tristate "Freescale MPC5xxx onboard CAN controller"
- depends on PPC_MPC52xx
+ depends on (PPC_MPC52xx || PPC_MPC512x)
---help---
If you say yes here you get support for Freescale's MPC5xxx
- onboard CAN controller.
+ onboard CAN controller. Currently, the MPC5200, MPC5200B and
+ MPC5121 (Rev. 2 and later) are supported.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called mscan-mpc5xxx.ko.
endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b16..03e7c48465a2 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mpc52xx.h>
@@ -36,22 +37,21 @@
#define DRV_NAME "mpc5xxx_can"
-static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+struct mpc5xxx_can_data {
+ unsigned int type;
+ u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-/*
- * Get frequency of the MSCAN clock source
- *
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
- * can be selected. According to the MPC5200 user's manual, the oscillator
- * clock is the better choice as it has less jitter but due to a hardware
- * bug, it can not be selected for the old MPC5200 Rev. A chips.
- */
-
-static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
- int clock_src)
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
pvr = mfspr(SPRN_PVR);
- freq = mpc5xxx_get_bus_frequency(of->node);
+ /*
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+ * (IP_CLK) can be selected as MSCAN clock source. According to
+ * the MPC5200 user's manual, the oscillator clock is the better
+ * choice as it has less jitter. For this reason, it is selected
+ * by default. Unfortunately, it can not be selected for the old
+ * MPC5200 Rev. A chips due to a hardware bug (check errata).
+ */
+ if (clock_name && strcmp(clock_name, "ip") == 0)
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+ else
+ *mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
if (!freq)
return 0;
- if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+ if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
return freq;
/* Determine SYS_XTAL_IN frequency from the clock domain settings */
np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
if (!np_cdm) {
- dev_err(&of->dev, "can't get clock node!\n");
+ dev_err(&ofdev->dev, "can't get clock node!\n");
return 0;
}
cdm = of_iomap(np_cdm, 0);
- of_node_put(np_cdm);
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
freq *= (val & (1 << 5)) ? 8 : 4;
freq /= (val & (1 << 6)) ? 12 : 16;
+ of_node_put(np_cdm);
iounmap(cdm);
return freq;
}
+#else /* !CONFIG_PPC_MPC52xx */
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+ u32 spmr; /* System PLL Mode Reg */
+ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
+ u32 scfr1; /* System Clk Freq Reg 1 */
+ u32 scfr2; /* System Clk Freq Reg 2 */
+ u32 reserved;
+ u32 bcr; /* Bread Crumb Reg */
+ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
+ u32 spccr; /* SPDIF Clk Ctrl Reg */
+ u32 cccr; /* CFM Clk Ctrl Reg */
+ u32 dccr; /* DIU Clk Cnfg Reg */
+ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+ { .compatible = "fsl,mpc5121-clock", },
+ {}
+};
+
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ struct mpc512x_clockctl __iomem *clockctl;
+ struct device_node *np_clock;
+ struct clk *sys_clk, *ref_clk;
+ int plen, clockidx, clocksrc = -1;
+ u32 sys_freq, val, clockdiv = 1, freq = 0;
+ const u32 *pval;
+
+ np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+ if (!np_clock) {
+ dev_err(&ofdev->dev, "couldn't find clock node\n");
+ return -ENODEV;
+ }
+ clockctl = of_iomap(np_clock, 0);
+ if (!clockctl) {
+ dev_err(&ofdev->dev, "couldn't map clock registers\n");
+ return 0;
+ }
+
+ /* Determine the MSCAN device index from the physical address */
+ pval = of_get_property(ofdev->node, "reg", &plen);
+ BUG_ON(!pval || plen < sizeof(*pval));
+ clockidx = (*pval & 0x80) ? 1 : 0;
+ if (*pval & 0x2000)
+ clockidx += 2;
+
+ /*
+ * Clock source and divider selection: 3 different clock sources
+ * can be selected: "ip", "ref" or "sys". For the latter two, a
+ * clock divider can be defined as well. If the clock source is
+ * not specified by the device tree, we first try to find an
+ * optimal CAN source clock based on the system clock. If that
+ * is not posslible, the reference clock will be used.
+ */
+ if (clock_name && !strcmp(clock_name, "ip")) {
+ *mscan_clksrc = MSCAN_CLKSRC_IPS;
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
+ } else {
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+ pval = of_get_property(ofdev->node,
+ "fsl,mscan-clock-divider", &plen);
+ if (pval && plen == sizeof(*pval))
+ clockdiv = *pval;
+ if (!clockdiv)
+ clockdiv = 1;
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ if (!sys_clk) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+ /* Get and round up/down sys clock rate */
+ sys_freq = 1000000 *
+ ((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+ if (!clock_name) {
+ /* A multiple of 16 MHz would be optimal */
+ if ((sys_freq % 16000000) == 0) {
+ clocksrc = 0;
+ clockdiv = sys_freq / 16000000;
+ freq = sys_freq / clockdiv;
+ }
+ } else {
+ clocksrc = 0;
+ freq = sys_freq / clockdiv;
+ }
+ }
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ if (!ref_clk) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+ clocksrc = 1;
+ freq = clk_get_rate(ref_clk) / clockdiv;
+ }
+ }
+
+ /* Disable clock */
+ out_be32(&clockctl->mccr[clockidx], 0x0);
+ if (clocksrc >= 0) {
+ /* Set source and divider */
+ val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+ out_be32(&clockctl->mccr[clockidx], val);
+ /* Enable clock */
+ out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+ }
+
+ /* Enable MSCAN clock domain */
+ val = in_be32(&clockctl->sccr[1]);
+ if (!(val & (1 << 25)))
+ out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+ dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+ *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+ clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+ of_node_put(np_clock);
+ iounmap(clockctl);
+
+ return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
struct device_node *np = ofdev->node;
struct net_device *dev;
struct mscan_priv *priv;
void __iomem *base;
- const char *clk_src;
- int err, irq, clock_src;
+ const char *clock_name = NULL;
+ int irq, mscan_clksrc = 0;
+ int err = -ENOMEM;
- base = of_iomap(ofdev->node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
- err = -ENOMEM;
- goto exit_release_mem;
+ return err;
}
irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
}
dev = alloc_mscandev();
- if (!dev) {
- err = -ENOMEM;
+ if (!dev)
goto exit_dispose_irq;
- }
priv = netdev_priv(dev);
priv->reg_base = base;
dev->irq = irq;
- /*
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
- * (IP_CLK) can be selected as MSCAN clock source. According to
- * the MPC5200 user's manual, the oscillator clock is the better
- * choice as it has less jitter. For this reason, it is selected
- * by default.
- */
- clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
- if (clk_src && strcmp(clk_src, "ip") == 0)
- clock_src = MSCAN_CLKSRC_BUS;
- else
- clock_src = MSCAN_CLKSRC_XTAL;
- priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+ clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+ BUG_ON(!data);
+ priv->type = data->type;
+ priv->can.clock.freq = data->get_clock(ofdev, clock_name,
+ &mscan_clksrc);
if (!priv->can.clock.freq) {
- dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
- err = -ENODEV;
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
}
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = register_mscandev(dev, clock_src);
+ err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
-exit_release_mem:
+
return err;
}
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
}
#endif
+static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+ .type = MSCAN_TYPE_MPC5200,
+ .get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+ .type = MSCAN_TYPE_MPC5121,
+ .get_clock = mpc512x_can_get_clock,
+};
+
static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
- {.compatible = "fsl,mpc5200-mscan"},
+ { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+ /* Note that only MPC5121 Rev. 2 (and later) is supported */
+ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
module_exit(mpc5xxx_can_exit);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca6..6b7dd578d417 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
priv->shadow_canrier = 0;
priv->flags = 0;
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ /* Clear pending bus-off condition */
+ if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ }
+
err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
if (err)
return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
out_8(&regs->cantier, 0);
/* Enable receive interrupts. */
- out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
- MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+
+ return 0;
+}
+
+static int mscan_restart(struct net_device *dev)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
+ "bus-off state expected");
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ /* Re-enable receive interrupts. */
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+ } else {
+ if (priv->can.state <= CAN_STATE_BUS_OFF)
+ mscan_set_mode(dev, MSCAN_INIT_MODE);
+ return mscan_start(dev);
+ }
return 0;
}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (frame->can_dlc > 8)
- return -EINVAL;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
* automatically. To avoid that we stop the chip doing
* a light-weight stop (we are in irq-context).
*/
- out_8(&regs->cantier, 0);
- out_8(&regs->canrier, 0);
- setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+ if (priv->type != MSCAN_TYPE_MPC5121) {
+ out_8(&regs->cantier, 0);
+ out_8(&regs->canrier, 0);
+ setbits8(&regs->canctl0,
+ MSCAN_SLPRQ | MSCAN_INITRQ);
+ }
can_bus_off(dev);
break;
default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
- if (priv->can.state <= CAN_STATE_BUS_OFF)
- mscan_set_mode(dev, MSCAN_INIT_MODE);
- ret = mscan_start(dev);
+ ret = mscan_restart(dev);
if (ret)
break;
if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_start_xmit = mscan_start_xmit,
};
-int register_mscandev(struct net_device *dev, int clock_src)
+int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
- if (clock_src)
+ if (mscan_clksrc)
ctl1 |= MSCAN_CLKSRC;
else
ctl1 &= ~MSCAN_CLKSRC;
+ if (priv->type == MSCAN_TYPE_MPC5121)
+ ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed8..4ff966473bc9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
#define MSCAN_CLKSRC 0x40
#define MSCAN_LOOPB 0x20
#define MSCAN_LISTEN 0x10
+#define MSCAN_BORM 0x08
#define MSCAN_WUPM 0x04
#define MSCAN_SLPAK 0x02
#define MSCAN_INITAK 0x01
-/* Use the MPC5200 MSCAN variant? */
+/* Use the MPC5XXX MSCAN variant? */
#ifdef CONFIG_PPC
-#define MSCAN_FOR_MPC5200
+#define MSCAN_FOR_MPC5XXX
#endif
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define MSCAN_CLKSRC_BUS 0
#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
#else
#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
#define MSCAN_EFF_RTR_SHIFT 0
#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
#define _MSCAN_RESERVED_DSR_SIZE 2
#else
@@ -165,67 +167,66 @@ struct mscan_regs {
u8 cantbsel; /* + 0x14 0x0a */
u8 canidac; /* + 0x15 0x0b */
u8 reserved; /* + 0x16 0x0c */
- _MSCAN_RESERVED_(6, 5); /* + 0x17 */
-#ifndef MSCAN_FOR_MPC5200
- u8 canmisc; /* 0x0d */
-#endif
+ _MSCAN_RESERVED_(6, 2); /* + 0x17 */
+ u8 canmisc; /* + 0x19 0x0d */
+ _MSCAN_RESERVED_(7, 2); /* + 0x1a */
u8 canrxerr; /* + 0x1c 0x0e */
u8 cantxerr; /* + 0x1d 0x0f */
- _MSCAN_RESERVED_(7, 2); /* + 0x1e */
+ _MSCAN_RESERVED_(8, 2); /* + 0x1e */
u16 canidar1_0; /* + 0x20 0x10 */
- _MSCAN_RESERVED_(8, 2); /* + 0x22 */
+ _MSCAN_RESERVED_(9, 2); /* + 0x22 */
u16 canidar3_2; /* + 0x24 0x12 */
- _MSCAN_RESERVED_(9, 2); /* + 0x26 */
+ _MSCAN_RESERVED_(10, 2); /* + 0x26 */
u16 canidmr1_0; /* + 0x28 0x14 */
- _MSCAN_RESERVED_(10, 2); /* + 0x2a */
+ _MSCAN_RESERVED_(11, 2); /* + 0x2a */
u16 canidmr3_2; /* + 0x2c 0x16 */
- _MSCAN_RESERVED_(11, 2); /* + 0x2e */
+ _MSCAN_RESERVED_(12, 2); /* + 0x2e */
u16 canidar5_4; /* + 0x30 0x18 */
- _MSCAN_RESERVED_(12, 2); /* + 0x32 */
+ _MSCAN_RESERVED_(13, 2); /* + 0x32 */
u16 canidar7_6; /* + 0x34 0x1a */
- _MSCAN_RESERVED_(13, 2); /* + 0x36 */
+ _MSCAN_RESERVED_(14, 2); /* + 0x36 */
u16 canidmr5_4; /* + 0x38 0x1c */
- _MSCAN_RESERVED_(14, 2); /* + 0x3a */
+ _MSCAN_RESERVED_(15, 2); /* + 0x3a */
u16 canidmr7_6; /* + 0x3c 0x1e */
- _MSCAN_RESERVED_(15, 2); /* + 0x3e */
+ _MSCAN_RESERVED_(16, 2); /* + 0x3e */
struct {
u16 idr1_0; /* + 0x40 0x20 */
- _MSCAN_RESERVED_(16, 2); /* + 0x42 */
+ _MSCAN_RESERVED_(17, 2); /* + 0x42 */
u16 idr3_2; /* + 0x44 0x22 */
- _MSCAN_RESERVED_(17, 2); /* + 0x46 */
+ _MSCAN_RESERVED_(18, 2); /* + 0x46 */
u16 dsr1_0; /* + 0x48 0x24 */
- _MSCAN_RESERVED_(18, 2); /* + 0x4a */
+ _MSCAN_RESERVED_(19, 2); /* + 0x4a */
u16 dsr3_2; /* + 0x4c 0x26 */
- _MSCAN_RESERVED_(19, 2); /* + 0x4e */
+ _MSCAN_RESERVED_(20, 2); /* + 0x4e */
u16 dsr5_4; /* + 0x50 0x28 */
- _MSCAN_RESERVED_(20, 2); /* + 0x52 */
+ _MSCAN_RESERVED_(21, 2); /* + 0x52 */
u16 dsr7_6; /* + 0x54 0x2a */
- _MSCAN_RESERVED_(21, 2); /* + 0x56 */
+ _MSCAN_RESERVED_(22, 2); /* + 0x56 */
u8 dlr; /* + 0x58 0x2c */
- u8:8; /* + 0x59 0x2d */
- _MSCAN_RESERVED_(22, 2); /* + 0x5a */
+ u8 reserved; /* + 0x59 0x2d */
+ _MSCAN_RESERVED_(23, 2); /* + 0x5a */
u16 time; /* + 0x5c 0x2e */
} rx;
- _MSCAN_RESERVED_(23, 2); /* + 0x5e */
+ _MSCAN_RESERVED_(24, 2); /* + 0x5e */
struct {
u16 idr1_0; /* + 0x60 0x30 */
- _MSCAN_RESERVED_(24, 2); /* + 0x62 */
+ _MSCAN_RESERVED_(25, 2); /* + 0x62 */
u16 idr3_2; /* + 0x64 0x32 */
- _MSCAN_RESERVED_(25, 2); /* + 0x66 */
+ _MSCAN_RESERVED_(26, 2); /* + 0x66 */
u16 dsr1_0; /* + 0x68 0x34 */
- _MSCAN_RESERVED_(26, 2); /* + 0x6a */
+ _MSCAN_RESERVED_(27, 2); /* + 0x6a */
u16 dsr3_2; /* + 0x6c 0x36 */
- _MSCAN_RESERVED_(27, 2); /* + 0x6e */
+ _MSCAN_RESERVED_(28, 2); /* + 0x6e */
u16 dsr5_4; /* + 0x70 0x38 */
- _MSCAN_RESERVED_(28, 2); /* + 0x72 */
+ _MSCAN_RESERVED_(29, 2); /* + 0x72 */
u16 dsr7_6; /* + 0x74 0x3a */
- _MSCAN_RESERVED_(29, 2); /* + 0x76 */
+ _MSCAN_RESERVED_(30, 2); /* + 0x76 */
u8 dlr; /* + 0x78 0x3c */
u8 tbpr; /* + 0x79 0x3d */
- _MSCAN_RESERVED_(30, 2); /* + 0x7a */
+ _MSCAN_RESERVED_(31, 2); /* + 0x7a */
u16 time; /* + 0x7c 0x3e */
} tx;
- _MSCAN_RESERVED_(31, 2); /* + 0x7e */
+ _MSCAN_RESERVED_(32, 2); /* + 0x7e */
} __attribute__ ((packed));
#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
#define MSCAN_SET_MODE_RETRIES 255
#define MSCAN_ECHO_SKB_MAX 3
+#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
+ MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
+ MSCAN_TSTATE1 | MSCAN_TSTATE0)
+
+/* MSCAN type variants */
+enum {
+ MSCAN_TYPE_MPC5200,
+ MSCAN_TYPE_MPC5121
+};
#define BTR0_BRP_MASK 0x3f
#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
+ unsigned int type; /* MSCAN type variants */
long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
};
extern struct net_device *alloc_mscandev(void);
-/*
- * clock_src:
- * 1 = The MSCAN clock source is the onchip Bus Clock.
- * 0 = The MSCAN clock source is the chip Oscillator Clock.
- */
-extern int register_mscandev(struct net_device *dev, int clock_src);
+extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
extern void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f247..9e277d64a318 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_PLX_PCI
+ tristate "PLX90xx PCI-bridge based Cards"
+ depends on PCI
+ ---help---
+ This driver is for CAN interface cards based on
+ the PLX90xx PCI bridge.
+ Driver supports now:
+ - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+ - Adlink PCI-7841/cPCI-7841 SE card
+ - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+ - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac03965..ce924553995d 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..87300606abb9 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-static struct pci_device_id ems_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
#define KVASER_PCI_DEVICE_ID2 0x0008
-static struct pci_device_id kvaser_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
{KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
{KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
{ 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 000000000000..6b46a6395f80
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_plx_pci"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
+ "the SJA1000 chips");
+MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
+ "Adlink PCI-7841/cPCI-7841 SE, "
+ "Marathon CAN-bus-PCI, "
+ "TEWS TECHNOLOGIES TPMC810");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+ int channels; /* detected channels count */
+ struct net_device *net_dev[PLX_PCI_MAX_CHAN];
+ void __iomem *conf_addr;
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX90xx registers */
+#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
+#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
+ * Serial EEPROM, and Initialization
+ * Control register
+ */
+
+#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
+#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
+#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define REG_CR 0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL 0x21
+#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
+#define REG_SR_BASICCAN_INITIAL 0x0c
+#define REG_IR_BASICCAN_INITIAL 0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL 0x01
+#define REG_SR_PELICAN_INITIAL 0x3c
+#define REG_IR_PELICAN_INITIAL 0x00
+
+#define ADLINK_PCI_VENDOR_ID 0x144A
+#define ADLINK_PCI_DEVICE_ID 0x7841
+
+#define MARATHON_PCI_DEVICE_ID 0x2715
+
+#define TEWS_PCI_VENDOR_ID 0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+ const char *name;
+ int channel_count;
+ u32 can_clock;
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+
+ /* Parameters for mapping local configuration space */
+ struct plx_pci_channel_map conf_map;
+
+ /* Parameters for mapping the SJA1000 chips */
+ struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841 SE", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
+ "Marathon CAN-bus-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+ &plx_pci_reset_marathon
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
+ "TEWS TECHNOLOGIES TPMC810", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
+ {
+ /* Adlink PCI-7841/cPCI-7841 */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink
+ },
+ {
+ /* Adlink PCI-7841/cPCI-7841 SE */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink_se
+ },
+ {
+ /* Marathon CAN-bus-PCI card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon
+ },
+ {
+ /* TEWS TECHNOLOGIES TPMC810 card */
+ TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_tews
+ },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
+{
+ iowrite8(val, priv->reg_base + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+{
+ int flag = 0;
+
+ /*
+ * Check registers after hardware reset (the Basic mode)
+ * See states on p. 10 of the Datasheet.
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+ (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+ /* Bring the SJA1000 into the PeliCAN mode*/
+ priv->write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /*
+ * Check registers after reset in the PeliCAN mode.
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+ return 0;
+}
+
+/*
+ * PLX90xx software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+ void __iomem *reset_addr;
+ int i;
+ int reset_bar[2] = {3, 5};
+
+ plx_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+ if (!reset_addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, reset_bar[i]);
+ } else {
+ /* reset the SJA1000 chip */
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, reset_addr);
+ }
+ }
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ int i = 0;
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "Removing %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ priv = netdev_priv(dev);
+ if (priv->reg_base)
+ pci_iounmap(pdev, priv->reg_base);
+ free_sja1000dev(dev);
+ }
+
+ plx_pci_reset_common(pdev);
+
+ /*
+ * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
+ * Local_2 interrupts
+ */
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+
+ if (card->conf_addr)
+ pci_iounmap(pdev, card->conf_addr);
+
+ kfree(card);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit plx_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct plx_pci_card *card;
+ struct plx_pci_card_info *ci;
+ int err, i;
+ u32 val;
+ void __iomem *addr;
+
+ ci = (struct plx_pci_card_info *)ent->driver_data;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+ ci->name, PCI_SLOT(pdev->devfn));
+
+ /* Allocate card structures to hold addresses, ... */
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ card->channels = 0;
+
+ /* Remap PLX90xx configuration space */
+ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap configuration space "
+ "(BAR%d)\n", ci->conf_map.bar);
+ goto failure_cleanup;
+ }
+ card->conf_addr = addr + ci->conf_map.offset;
+
+ ci->reset_func(pdev);
+
+ /* Detect available channels */
+ for (i = 0; i < ci->channel_count; i++) {
+ struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+
+ dev->irq = pdev->irq;
+
+ /*
+ * Remap IO space of the SJA1000 chips
+ * This is device-dependent mapping
+ */
+ addr = pci_iomap(pdev, cm->bar, cm->size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+ goto failure_cleanup;
+ }
+
+ priv->reg_base = addr + cm->offset;
+ priv->read_reg = plx_pci_read_reg;
+ priv->write_reg = plx_pci_write_reg;
+
+ /* Check if channel is present */
+ if (plx_pci_check_sja1000(priv)) {
+ priv->can.clock.freq = ci->can_clock;
+ priv->ocr = ci->ocr;
+ priv->cdr = ci->cdr;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed "
+ "(err=%d)\n", err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+ "registered as %s\n", i + 1, priv->reg_base,
+ dev->irq, dev->name);
+ } else {
+ dev_err(&pdev->dev, "Channel #%d not detected\n",
+ i + 1);
+ free_sja1000dev(dev);
+ }
+ }
+
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ /*
+ * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+ * Local_2 interrupts from the SJA1000 chips
+ */
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+
+ return 0;
+
+failure_cleanup:
+ dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+ plx_pci_del_card(pdev);
+
+ return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = plx_pci_tbl,
+ .probe = plx_pci_add_card,
+ .remove = plx_pci_del_card,
+};
+
+static int __init plx_pci_init(void)
+{
+ return pci_register_driver(&plx_pci_driver);
+}
+
+static void __exit plx_pci_exit(void)
+{
+ pci_unregister_driver(&plx_pci_driver);
+}
+
+module_init(plx_pci_init);
+module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b4..ace103a44833 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -249,6 +249,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
uint8_t dreg;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
fi = dlc = cf->can_dlc;
@@ -564,6 +567,7 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.bittiming_const = &sja1000_bittiming_const;
priv->can.do_set_bittiming = sja1000_set_bittiming;
priv->can.do_set_mode = sja1000_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da528..8332e242b0be 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -477,6 +477,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
mbxno = get_tx_head_mb(priv);
mbx_mask = BIT(mbxno);
spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +494,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = min_t(u8, cf->can_dlc, 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
data |= get_tx_head_prio(priv) << 8;
@@ -907,6 +909,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
priv->can.do_get_state = ti_hecc_get_state;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a15..97ff6febad63 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
- from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+ from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf4..11c87840cc00 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -1019,8 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.bittiming_const = &ems_usb_bittiming_const;
dev->can.do_set_bittiming = ems_usb_set_bittiming;
dev->can.do_set_mode = ems_usb_set_mode;
-
- netdev->flags |= IFF_ECHO; /* we support local echo */
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..d124d837ae58 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
#include <net/rtnetlink.h>
static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += cf->can_dlc;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
}
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e488..ad47e5126fde 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -106,7 +106,7 @@
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe09..bb159d9603bf 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -97,13 +97,13 @@ struct t1_rx_mode {
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
-#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
{
u8 *addr = NULL;
- if (rm->idx++ < rm->dev->mc_count) {
+ if (rm->idx++ < t1_rx_mode_mc_cnt(rm)) {
addr = rm->list->dmi_addr;
rm->list = rm->list->next;
}
@@ -334,7 +334,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
return adapter->params.is_asic;
}
-extern struct pci_device_id t1_pci_tbl[];
+extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bfe..2402d372c886 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
};
-struct pci_device_id t1_pci_tbl[] = {
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 8d0be26f94e3..c9c537be4ab9 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -340,7 +340,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ for (i = 0, iter = dev->mc_list; i < netdev_mc_count(dev);
i++, iter = iter->next) {
bit = 0;
tmp = iter->dmi_addr[0];
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index a24be34a3f7a..c9309eadebc1 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1564,7 +1564,7 @@ static void
set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int num_addr = dev->mc_count;
+ int num_addr = netdev_mc_count(dev);
unsigned long int lo_bits;
unsigned long int hi_bits;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..73622f5312cb 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static const struct pci_device_id cxgb3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
CH_DEVICE(0x20, 0), /* PE9000 */
CH_DEVICE(0x21, 1), /* T302E */
CH_DEVICE(0x22, 2), /* T310E */
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d87..9498361119d6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
struct mtutab mtutab;
unsigned int l2t_capacity;
- t = kcalloc(1, sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 318a018ca7c5..048205903741 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -480,6 +480,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= q->credits / 4) {
q->pend_cred = 0;
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
}
@@ -2286,11 +2287,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
while (likely(budget_left && is_new_response(r, q))) {
int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
struct sk_buff *skb = NULL;
- u32 len, flags = ntohl(r->flags);
- __be32 rss_hi = *(const __be32 *)r,
- rss_lo = r->rss_hdr.rss_hash_val;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+ rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2501,7 +2505,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
- } while (is_new_response(r, q) && is_pure_response(r));
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2535,6 +2542,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
+ rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe065570..3ab9f51918aa 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1262,7 +1262,8 @@ void t3_link_changed(struct adapter *adapter, int port_id)
lc->fc = fc;
}
- t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
}
void t3_link_fault(struct adapter *adapter, int port_id)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 0109ee4f2f91..0c08de5d09fd 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -353,6 +353,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* packet size register includes header, but not FCS.
*/
mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 33c4fe26178c..d1e03b5984c0 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -956,11 +956,11 @@ static void emac_dev_mcast_set(struct net_device *ndev)
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
- if (ndev->mc_count > 0) {
+ if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mc_ptr;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
@@ -2672,8 +2672,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
- for regs\n");
+ dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 45794f6cb0f6..a0a6830b5e6d 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -464,7 +464,7 @@ static int de620_close(struct net_device *dev)
static void de620_set_multicast_list(struct net_device *dev)
{
- if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index be9590253aa1..76e0de6a4263 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -960,7 +960,7 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a3..5adb1e03956d 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
- "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "" : "I/O ",
- (long long)bar_start, dev->irq,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ (long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
@@ -2230,7 +2227,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
* perfect filtering will be used.
*/
- if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
@@ -2238,7 +2235,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
- bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
@@ -3631,7 +3628,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
const struct pci_device_id *);
static void __devexit dfx_pci_unregister(struct pci_dev *);
-static struct pci_device_id dfx_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0c1f491d20bf..314bc96689f4 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1287,7 +1287,7 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 2a8b6a7c0b87..dea40953ed1b 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1128,17 +1128,17 @@ set_multicast (struct net_device *dev)
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mclist;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i=0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist=mclist->next)
{
int bit, index = 0;
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca8..7caab3d26a9e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
driver_data Data private to the driver.
*/
-static const struct pci_device_id rio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
{ }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b37730065688..da0985a7a87a 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -725,7 +725,7 @@ dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct dev_mc_list *mcptr = dev->mc_list;
- int mc_cnt = dev->mc_count;
+ int mc_cnt = netdev_mc_count(dev);
int i, oft;
u32 hash_val;
u16 hash_table[4];
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d3..e8c0e823a06f 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
-static struct pci_device_id e100_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
@@ -1538,7 +1538,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct dev_mc_list *list = netdev->mc_list;
- u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
@@ -1552,7 +1552,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
struct nic *nic = netdev_priv(netdev);
DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev->mc_count, netdev->flags);
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1560,7 +1560,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
nic->flags &= ~promiscuous;
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
nic->flags |= multicast_all;
else
nic->flags &= ~multicast_all;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e8932db7ee77..9902b33b7160 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -349,6 +349,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 13e9ece16889..c67e93117271 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -215,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
return 0;
}
+static u32 e1000_get_link(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ adapter->hw.get_link_status = 1;
+
+ return e1000_has_link(adapter);
+}
+
static void e1000_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1892,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532eccf..3b14dd718ab4 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
* Macro expands to...
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -847,6 +847,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_pci_reg;
pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -2127,7 +2130,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- if (netdev->uc.count > rar_entries - 1) {
+ if (netdev_uc_count(netdev) > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
@@ -2150,7 +2153,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*/
i = 1;
if (use_uc)
- list_for_each_entry(ha, &netdev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
if (i == rar_entries)
break;
e1000_rar_set(hw, ha->addr, i++);
@@ -2246,7 +2249,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
}
}
-static bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -4596,6 +4599,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (adapter->need_ioport)
err = pci_enable_device(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 02d67d047d96..3c95acb3a87d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -267,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
switch (hw->mac.type) {
+ case e1000_82573:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ break;
case e1000_82574:
case e1000_82583:
+ func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
@@ -922,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- if (hw->mac.type == e1000_82571 &&
- hw->dev_spec.e82571.alt_mac_addr_is_present)
- e1000e_set_laa_state_82571(hw, true);
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1225,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
- * e1000_update_mc_addr_list_82571 - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
- *
- * Updates the Receive Address Registers and Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
- **/
-static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count)
-{
- if (e1000e_get_laa_state_82571(hw))
- rar_count--;
-
- e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
-}
-
-/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1621,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
}
/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1695,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
- .update_mc_addr_list = e1000_update_mc_addr_list_82571,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
@@ -1706,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed4..db05ec355749 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d236efaf7478..c2ec095d2163 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -459,7 +459,7 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
@@ -503,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -517,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count);
+ u32 mc_addr_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -530,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
extern void e1000e_update_adaptive(struct e1000_hw *hw);
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm(struct e1000_hw *hw);
-extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e2aa3b788564..27d21589a69a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -246,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
break;
}
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
return 0;
}
@@ -814,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- return 0;
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
}
/**
@@ -1340,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1403,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
}
static struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 0aa50c229c79..b33e3cbe9ab0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -202,7 +202,7 @@ static u32 e1000_get_link(struct net_device *netdev)
if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
- return e1000_has_link(adapter);
+ return e1000e_has_link(adapter);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index eccf29b75c41..8bdcd5f24eff 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
#define E1000_FUNC_1 1
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
enum e1000_mac_type {
e1000_82571,
e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
- void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
};
/* Function pointers for the PHY. */
@@ -814,6 +819,10 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
@@ -897,7 +906,6 @@ struct e1000_fc_info {
struct e1000_dev_spec_82571 {
bool laa_is_present;
- bool alt_mac_addr_is_present;
u32 smb_counter;
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b6ecd127889..54d03a0ce3ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3368,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
.get_link_up_info = e1000_get_link_up_info_ich8lan,
/* led_on dependent on mac type */
/* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2fa9b36a2c5a..2425ed11d5cc 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
struct e1000_adapter *adapter = hw->adapter;
- u32 status;
- u16 pcie_link_status, pci_header_type, cap_offset;
+ u16 pcie_link_status, cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
PCIE_LINK_WIDTH_SHIFT);
}
- pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
- &pci_header_type);
- if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
- status = er32(STATUS);
- bus->func = (status & E1000_STATUS_FUNC_MASK)
- >> E1000_STATUS_FUNC_SHIFT;
- } else {
- bus->func = 0;
- }
+ mac->ops.set_lan_id(hw);
return 0;
}
/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /*
+ * The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
* e1000_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to the HW structure
*
@@ -139,6 +165,68 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
}
/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ /* There is no Alternate MAC Address */
+ goto out;
+ }
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
+ }
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000e_rar_set - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
@@ -252,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
**/
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
- u32 i;
- u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
- if (!mcarray) {
- printk(KERN_ERR "multicast array memory allocation failed\n");
- return;
- }
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /*
- * Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = rar_used_count; i < rar_count; i++) {
- if (mc_addr_count) {
- e1000e_rar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
- e1e_flush();
- }
- }
-
- /* Load any remaining multicast addresses into the hash table. */
- for (; mc_addr_count > 0; mc_addr_count--) {
- u32 hash_value, hash_reg, hash_bit, mta;
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
- e_dbg("Hash value = 0x%03X\n", hash_value);
+
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
- mc_addr_list += ETH_ALEN;
- }
- /* write the hash table completely */
- for (i = 0; i < hw->mac.mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
e1e_flush();
- kfree(mcarray);
}
/**
@@ -2072,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
- * e1000e_read_mac_addr - Read device MAC address
+ * e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
-s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 offset, nvm_data, i;
- u16 mac_addr_offset = 0;
-
- if (hw->mac.type == e1000_82571) {
- /* Check for an alternate MAC address. An alternate MAC
- * address can be setup by pre-boot software and must be
- * treated like a permanent address and must override the
- * actual permanent MAC address.*/
- ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &mac_addr_offset);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (mac_addr_offset == 0xFFFF)
- mac_addr_offset = 0;
-
- if (mac_addr_offset) {
- if (hw->bus.func == E1000_FUNC_1)
- mac_addr_offset += ETH_ALEN/sizeof(u16);
-
- /* make sure we have a valid mac address here
- * before using it */
- ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
- &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (nvm_data & 0x0001)
- mac_addr_offset = 0;
- }
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
- if (mac_addr_offset)
- hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
- }
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
- for (i = 0; i < ETH_ALEN; i += 2) {
- offset = mac_addr_offset + (i >> 1);
- ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
- hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
- }
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
- /* Flip last bit of mac address if we're on second port */
- if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
- hw->mac.perm_addr[5] ^= 1;
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fbe..88d54d3efcef 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2541,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates the Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this. Currently no func pointer
- * exists and all implementations are handled in the generic version of this
- * function.
**/
static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 rar_used_count,
- u32 rar_count)
+ u32 mc_addr_count)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
}
/**
@@ -2572,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
struct dev_mc_list *mc_ptr;
u8 *mta_list;
u32 rctl;
@@ -2598,31 +2589,25 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ mc_ptr->dmi_addr, ETH_ALEN);
- e1000_update_mc_addr_list(hw, mta_list, i, 1,
- mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
} else {
/*
* if we're called from probe, we might not have
* anything to do here, so clear out the list
*/
- e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, NULL, 0);
}
}
@@ -3482,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -3563,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
- link = e1000_has_link(adapter);
+ link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
e1000e_enable_receives(adapter);
goto link_up;
@@ -5134,7 +5119,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_eeprom_checks(adapter);
- /* copy the MAC address out of the NVM */
+ /* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
e_err("NVM Read Error while reading MAC address\n");
@@ -5326,7 +5311,7 @@ static struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 94c59498cdb6..488bc13cc7e6 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1288,8 +1288,9 @@ set_multicast_list(struct net_device *dev)
short ioaddr = dev->base_addr;
unsigned short mode;
struct dev_mc_list *dmi=dev->mc_list;
+ int mc_count = mc_count;
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1299,7 +1300,7 @@ set_multicast_list(struct net_device *dev)
eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
}
- else if (dev->mc_count==0 )
+ else if (mc_count == 0)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1329,9 +1330,9 @@ set_multicast_list(struct net_device *dev)
outw(MC_SETUP, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
- outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- for (i = 0; i < dev->mc_count; i++)
+ for (i = 0; i < mc_count; i++)
{
eaddrs=(unsigned short *)dmi->dmi_addr;
dmi=dmi->next;
@@ -1348,7 +1349,7 @@ set_multicast_list(struct net_device *dev)
outb(MC_SETUP, ioaddr);
/* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
if (lp->tx_start != lp->tx_end)
{
@@ -1380,8 +1381,8 @@ set_multicast_list(struct net_device *dev)
break;
} else if ((i & 0x0f) == 0x03) { /* MC-Done */
printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, dev->mc_count,
- dev->mc_count > 1 ? "es":"");
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
break;
}
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 6fbfc8eee632..d804ff18eda8 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1578,7 +1578,7 @@ static void eexp_setup_filter(struct net_device *dev)
{
struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
- int count = dev->mc_count;
+ int count = netdev_mc_count(dev);
int i;
if (count > 8) {
printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
@@ -1627,9 +1627,9 @@ eexp_set_multicast(struct net_device *dev)
}
if (!(dev->flags & IFF_PROMISC)) {
eexp_setup_filter(dev);
- if (lp->old_mc_count != dev->mc_count) {
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
kick = 1;
- lp->old_mc_count = dev->mc_count;
+ lp->old_mc_count = netdev_mc_count(dev);
}
}
if (kick) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 7b62336e6736..99e4f8360d2f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1981,7 +1981,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
@@ -1990,14 +1990,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ for (i = 0, k_mcl_entry = dev->mc_list; i < netdev_mc_count(dev); i++,
k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 66813c91a720..3ee32e58c7ec 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1413,7 +1413,7 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (netif_msg_link(priv))
dev_info(&dev->dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
- } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
dev_info(&dev->dev, "%smulticast mode\n",
(dev->flags & IFF_ALLMULTI) ? "all-" : "");
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228ba..ee01f5a6d0d4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.100"
+#define DRV_VERSION "1.1.0.241a"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -89,9 +89,12 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int flags;
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15e..94749ebaaea8 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
/* Supported devices */
-static struct pci_device_id enic_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ 0, } /* end of table */
};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
enic->msg_enable = value;
}
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+
+ tx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->tx_coalesce_usecs);
+ rx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->rx_coalesce_usecs);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
+ INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = enic_set_tso,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
u32 mtu = vnic_dev_mtu(enic->vdev);
if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
if (mtu < enic->netdev->mtu)
printk(KERN_WARNING PFX
"%s: interface MTU (%d) set higher "
"than switch port MTU (%d)\n",
enic->netdev->name, enic->netdev->mtu, mtu);
- enic->port_mtu = mtu;
}
}
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
/* netif_tx_lock held, process context with BHs disabled, or BH */
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0];
@@ -769,10 +827,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+ unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
spin_lock(&enic->devcmd_lock);
- vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
/* Is there an easier way? Trying to minimize to
* calls to add/del multicast addrs. We keep the
@@ -1084,34 +1146,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_rq_drop_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb = buf->os_buf;
-
- if (skipped)
- return;
-
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_any(skb);
-}
-
-static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_drop_buf, opaque);
-
- return 0;
-}
-
static int enic_poll(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1153,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
+ int err;
/* Service RQ (first) and WQ
*/
@@ -1142,16 +1177,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- if (rq_work_done > 0) {
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
- /* Replenish RQ
- */
+ /* Buffer allocation failed. Stay in polling
+ * mode so we can try to fill the ring again.
+ */
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ if (err)
+ rq_work_done = rq_work_to_do;
- } else {
+ if (rq_work_done < rq_work_to_do) {
- /* If no work done, flush all LROs and exit polling
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1208,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
struct net_device *netdev = enic->netdev;
unsigned int work_to_do = budget;
unsigned int work_done;
+ int err;
/* Service RQ
*/
@@ -1177,25 +1216,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
work_to_do, enic_rq_service, NULL);
- if (work_done > 0) {
-
- /* Replenish RQ
- */
-
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
-
- /* Return intr event credits for this polling
- * cycle. An intr event is the completion of a
- * RQ packet.
- */
+ /* Return intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * RQ packet.
+ */
+ if (work_done > 0)
vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
work_done,
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- } else {
- /* If no work done, flush all LROs and exit polling
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling mode
+ * so we can try to fill the ring again.
+ */
+
+ if (err)
+ work_done = work_to_do;
+
+ if (work_done < work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1348,24 @@ static int enic_request_intr(struct enic *enic)
return err;
}
+static void enic_synchronize_irqs(struct enic *enic)
+{
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ synchronize_irq(enic->pdev->irq);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->intr_count; i++)
+ synchronize_irq(enic->msix_entry[i].vector);
+ break;
+ default:
+ break;
+ }
+}
+
static int enic_notify_set(struct enic *enic)
{
int err;
@@ -1360,11 +1422,13 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
- if (err) {
+ vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n",
netdev->name);
+ err = -ENOMEM;
goto err_out_notify_unset;
}
}
@@ -1409,16 +1473,19 @@ static int enic_stop(struct net_device *netdev)
unsigned int i;
int err;
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_mask(&enic->intr[i]);
+
+ enic_synchronize_irqs(enic);
+
del_timer_sync(&enic->notify_timer);
spin_lock(&enic->devcmd_lock);
vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
napi_disable(&enic->napi);
- netif_stop_queue(netdev);
-
- for (i = 0; i < enic->intr_count; i++)
- vnic_intr_mask(&enic->intr[i]);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1503,6 @@ static int enic_stop(struct net_device *netdev)
spin_unlock(&enic->devcmd_lock);
enic_free_intr(enic);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
- -1, enic_rq_service_drop, NULL);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
- -1, enic_wq_service, NULL);
-
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1824,8 @@ int enic_dev_init(struct enic *enic)
err = enic_set_intr_mode(enic);
if (err) {
printk(KERN_ERR PFX
- "Failed to set intr mode, aborting.\n");
+ "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting.\n");
return err;
}
@@ -1986,6 +2049,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_dev_deinit;
}
+ enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
+
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc9..02839bf0fe8b 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
- GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
- c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
- c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
max_t(u16, ENIC_MIN_MTU,
c->mtu));
- c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_usec = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ c->intr_timer_usec);
printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
- "intr timer %d\n",
+ "intr timer %d usec\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
- ENIC_SETTING(enic, LRO), c->intr_timer);
+ ENIC_SETTING(enic, LRO), c->intr_timer_usec);
return 0;
}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
- enic->config.intr_timer,
+ INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
enic->config.intr_timer_type,
mask_on_assertion);
}
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d3..69b9b70c7da0 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
};
#define VNIC_DEV_CAP_INIT 0x0001
-#define VNIC_DEV_CAP_PERBI 0x0002
struct vnic_dev {
void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b8..8eeb6758491b 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
+
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
+ u32 intr_timer_usec;
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195e..3934309a9498 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
- iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+}
+
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce6..2fe6c6339e3c 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
+ (void)ioread32(&intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d8..cf80ab46d582 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
-#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec8..31a3adb65566 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
};
-static struct pci_device_id epic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
@@ -1390,20 +1390,20 @@ static void set_rx_mode(struct net_device *dev)
outl(0x002C, ioaddr + RxCtrl);
/* Unconditionally log net taps. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
/* There is apparently a chip bug, so the multicast filter
is never enabled. */
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outl(0x000C, ioaddr + RxCtrl);
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 71bfeec33a0b..d3abeee3f110 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1359,7 +1359,7 @@ static void eth16i_multicast(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1b..f9d5ca078743 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
mmio = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mmio) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
mem = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mem) {
dev_err(&pdev->dev, "cannot request memory space\n");
ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = 0;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
- mmio->end - mmio->start + 1);
+ resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (netdev->mem_end) {
priv->membase = devm_ioremap_nocache(&pdev->dev,
- netdev->mem_start, mem->end - mem->start + 1);
+ netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index dd4ba01fd92d..96817a872f47 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1213,7 +1213,7 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589e..f95b5ff0587d 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1786,7 +1786,7 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
@@ -1796,7 +1796,7 @@ static void __set_rx_mode(struct net_device *dev)
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
@@ -1941,7 +1941,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static struct pci_device_id fealnx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 16a1d58419d9..d9d14c83f51c 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1128,6 +1128,26 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1137,6 +1157,7 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
NULL
};
@@ -1585,7 +1606,7 @@ static void set_multicast_list(struct net_device *dev)
dmi = dev->mc_list;
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ for (j = 0; j < netdev_mc_count(dev); j++, dmi = dmi->next) {
/* Only support group multicast for now */
if (!(dmi->dmi_addr[0] & 1))
continue;
@@ -1658,6 +1679,7 @@ static int fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
int i;
/* Allocate memory for buffer descriptors. */
@@ -1710,6 +1732,34 @@ static int fec_enet_init(struct net_device *dev, int index)
/* Set MII speed to 2.5 MHz */
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
/ 2500000) / 2) & 0x3F) << 1;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
fec_restart(dev, 0);
/* Queue up command to detect the PHY and initialize the
@@ -1730,7 +1780,6 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct bufdesc *bdp;
int i;
/* Whack a reset. We should wait for this. */
@@ -1768,33 +1817,6 @@ fec_restart(struct net_device *dev, int duplex)
}
}
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
/* Enable MII mode */
if (duplex) {
/* MII enable / FD enable */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 848e8407ea8f..10903b75802f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -581,7 +581,7 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
u32 gaddr2 = 0x00000000;
dmi = dev->mc_list;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804a..3eb713b014f9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6198,7 +6198,7 @@ static void nv_shutdown(struct pci_dev *pdev)
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 22e5a847a588..482f27d5f7d4 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -218,7 +218,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ca7bcb8ab3a1..ddf13ef8ac87 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -220,7 +220,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 008cdd9cc536..141dbc91e5e7 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -213,7 +213,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 8bd3c9f17532..c9be090485dd 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2863,7 +2863,7 @@ static void gfar_set_multi(struct net_device *dev)
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a2..c70b147b4feb 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1854,13 +1854,13 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct dev_mc_list *mclist;
int i;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
}
}
-static struct pci_device_id hamachi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e1..debac1bc6799 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
-static struct pci_device_id hp100_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
@@ -2090,7 +2090,7 @@ static void hp100_set_multicast_list(struct net_device *dev)
lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
memset(&lp->hash_bytes, 0xff, 8);
- } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
@@ -2104,9 +2104,10 @@ static void hp100_set_multicast_list(struct net_device *dev)
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
#endif
- for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0, dmi = dev->mc_list; i < netdev_mc_count(dev); i++, dmi = dmi->next) {
addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index fb5e019169ee..b75d27e82a3d 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -391,7 +391,7 @@ static void emac_hash_mc(struct emac_instance *dev)
struct dev_mc_list *dmi;
int i;
- DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
@@ -425,9 +425,9 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME;
else if (ndev->flags & IFF_ALLMULTI ||
- (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME;
- else if (ndev->mc_count > 0)
+ else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
return r;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a86693906ac8..41b9c0efcbdd 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1062,7 +1062,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
@@ -1083,7 +1084,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ for(i = 0; i < netdev_mc_count(netdev); ++i, mclist = mclist->next) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d51c9927c819..bb53083ec61f 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -219,6 +219,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index dd4e6ffd29f5..abb7333a1fbf 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -310,6 +310,7 @@
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b1c1eb88893f..83ea11701f45 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -75,11 +75,14 @@ struct vf_data_storage {
u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c881347cb26d..4fe7b0ba6310 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -133,6 +133,12 @@ static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1352,6 +1358,10 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_register = igb_vlan_rx_register,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
@@ -2479,7 +2489,8 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
wr32(E1000_RLPML, max_frame_size);
}
-static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -2492,8 +2503,11 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
- E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
/* clear all bits that might not be set */
vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@ -2564,7 +2578,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */
- igb_set_vmolr(adapter, reg_idx & 0x7);
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
/* enable receive descriptor fetching */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2848,14 +2862,14 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
u32 vmolr = 0;
int i;
- if (!netdev->mc_count) {
+ if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igb_update_mc_addr_list(hw, NULL, 0);
igb_restore_vf_multicasts(adapter);
return 0;
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
@@ -2865,7 +2879,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
@@ -2874,7 +2888,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev->mc_count;
+ return netdev_mc_count(netdev);
}
/**
@@ -2895,12 +2909,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev->uc.count > rar_entries)
+ if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (netdev->uc.count && rar_entries) {
+ if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- list_for_each_entry(ha, &netdev->uc.list, list) {
+
+ netdev_for_each_uc_addr(ha, netdev) {
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
@@ -4095,6 +4110,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4486,10 +4504,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
reg |= size;
wr32(E1000_VMOLR(vf), reg);
}
- return 0;
}
}
- return -1;
+ return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -4502,15 +4567,21 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear all flags */
- adapter->vf_data[vf].flags = 0;
+ /* clear flags */
+ adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */
- igb_set_vmolr(adapter, vf);
+ igb_set_vmolr(adapter, vf, true);
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -4524,7 +4595,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
/* generate a new mac address as we were hotplug removed/added */
- random_ether_addr(vf_mac);
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -4637,7 +4709,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
break;
case E1000_VF_SET_VLAN:
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ retval = -1;
+ else
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -4718,6 +4793,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(q_vector);
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4757,6 +4835,9 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -5993,6 +6074,43 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
return 0;
}
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+ return 0;
+}
+
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 2aa71a766c35..6029c400f2be 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1403,8 +1403,8 @@ static void igbvf_set_multi(struct net_device *netdev)
u8 *mta_list = NULL;
int i;
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list) {
dev_err(&adapter->pdev->dev,
"failed to allocate multicast filter list\n");
@@ -1415,7 +1415,7 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
@@ -2609,11 +2609,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
- dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
- /* MAC address */
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
}
@@ -2779,11 +2775,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
err = -EIO;
goto err_hw_init;
}
@@ -2885,7 +2878,7 @@ static struct pci_error_handlers igbvf_err_handler = {
.resume = igbvf_io_resume,
};
-static struct pci_device_id igbvf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
{ } /* terminate list */
};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c2..0bd5fef22d49 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
*/
}
-static struct pci_device_id ioc3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
@@ -1681,14 +1681,15 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addr = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a0..dbdebd5efe86 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -92,7 +92,7 @@ static const char *ipg_brand_name[] = {
"D-Link NIC IP1000A"
};
-static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
@@ -585,11 +585,11 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
receivemode = IPG_RM_RECEIVEALLFRAMES;
} else if ((dev->flags & IFF_ALLMULTI) ||
((dev->flags & IFF_MULTICAST) &&
- (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
/* NIC to be configured to receive all multicast
* frames. */
receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
/* NIC to be configured to receive selected
* multicast addresses. */
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f76384221422..af10e97345ce 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
comment "Dongle support"
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c8..e030d47e2793 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
# dongle drivers for SIR drivers
obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d5572..b7e6625ca75e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
-static struct pci_device_id toshoboe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 000000000000..d7c983dc91ad
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,823 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int i, index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ for (i = 0;
+ freq_table[i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ u32 freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = i;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0000, 0625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (!clk) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+ self->ndev->last_rx = jiffies;
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap)
+ goto open_err;
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ void __iomem *base;
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ self = netdev_priv(ndev);
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->membase = base;
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = __devexit_p(sh_sir_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_sir_init(void)
+{
+ return platform_driver_register(&sh_sir_driver);
+}
+
+static void __exit sh_sir_exit(void)
+{
+ platform_driver_unregister(&sh_sir_driver);
+}
+
+module_init(sh_sir_init);
+module_exit(sh_sir_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd5453..6533c010cf5c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
}
}
-static struct pci_device_id via_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76a..209d4bcfaced 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
static /* const */ char drivername[] = DRIVER_NAME;
-static struct pci_device_id vlsi_irda_table [] = {
+static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
{
.class = PCI_CLASS_WIRELESS_IRDA << 8,
.class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 04d0502726c0..bb4a3cda6e4b 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -655,14 +655,15 @@ set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
outw(MULTICAST|PROMISC, ioaddr);
}
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+ else if ((dev->flags&IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS)
{
/* Disable promiscuous mode, use normal mode. */
hardware_set_filter(NULL);
outw(MULTICAST, ioaddr);
}
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
/* Walk the address list, and load the filter */
hardware_set_filter(dev->mc_list);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 16c91910d6c1..ff015e15f5d1 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -958,7 +958,7 @@ static void veth_set_multicast_list(struct net_device *dev)
write_lock_irqsave(&port->mcast_gate, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > VETH_MAX_MCAST)) {
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
struct dev_mc_list *dmi = dev->mc_list;
@@ -969,7 +969,7 @@ static void veth_set_multicast_list(struct net_device *dev)
/* Update table */
port->num_mcast = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
u8 *addr = dmi->dmi_addr;
u64 xaddr = 0;
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 5257ae08b9f9..92d2e71d0c8b 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -75,19 +75,14 @@ struct ixgb_adapter;
#include "ixgb_ee.h"
#include "ixgb_ids.h"
+#define PFX "ixgb: "
+
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args)
+#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
#else
#define IXGB_DBG(args...)
#endif
-#define PFX "ixgb: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args))
-
-
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 593d1a4f217c..93d018505ebb 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
@@ -238,8 +238,8 @@ ixgb_up(struct ixgb_adapter *adapter)
if (err) {
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
return err;
}
@@ -310,7 +310,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
ixgb_adapter_stop(hw);
if (!ixgb_init_hw(hw))
- DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
+ netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
/* restore frame size information */
IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
@@ -447,7 +447,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -456,7 +457,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -477,7 +478,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
+ netif_info(adapter, probe, adapter->netdev,
+ "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
@@ -552,14 +554,14 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
- if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
- DPRINTK(PROBE, ERR, "unsupported device id\n");
+ netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
}
/* enable flow control to be programmed */
@@ -661,8 +663,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor ring memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -675,8 +677,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -750,8 +752,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptor ring\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -765,8 +767,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptors\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1077,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_VFE;
}
- if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
@@ -1092,7 +1094,7 @@ ixgb_set_multi(struct net_device *netdev)
memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
- ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
}
@@ -1580,7 +1582,8 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
if ((new_mtu < 68) ||
(max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
+ netif_err(adapter, probe, adapter->netdev,
+ "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
@@ -1616,7 +1619,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
return;
if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
@@ -1854,24 +1857,25 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
IXGB_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- IXGB_READ_REG(&adapter->hw, TDH),
- IXGB_READ_REG(&adapter->hw, TDT),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->status);
+ netif_err(adapter, drv, adapter->netdev,
+ "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
netif_stop_queue(netdev);
}
}
@@ -2269,7 +2273,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
struct ixgb_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2285,14 +2290,16 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/* Make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, invalid MAC address\n");
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index bfef0ebcba9a..8f81efb49169 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 303e7bd39b67..19e94ee155a2 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,22 @@
#define IXGBE_MAX_RSC_INT_RATE 162760
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ int rar;
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -159,6 +175,7 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
+ int numa_node;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
@@ -171,7 +188,7 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_DCB,
- RING_F_VMDQ,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
@@ -277,7 +294,7 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
@@ -286,8 +303,10 @@ struct ixgbe_adapter {
u64 lsc_int;
/* RX */
- struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
@@ -323,13 +342,14 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
+#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,13 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ int node;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
};
enum ixbge_state_t {
@@ -426,6 +453,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
@@ -440,6 +471,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b49bd6b9feb7..1f30e163bd9c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = 0;
- u32 ctrl, ctrl_ext;
+ u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
msleep(50);
-
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 regindex;
+ u32 vlvf_index;
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt_ctl;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !first_empty_slot)
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
+ vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+ goto out;
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
+ /* find the vlanid or the first empty slot */
+ first_empty_slot = 0;
+
+ for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+ if (!bits && !first_empty_slot)
+ first_empty_slot = vlvf_index;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ vlvf_index = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ goto out;
}
+ }
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- }
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
} else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- }
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
}
+ }
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ /* if bits is non-zero then some pools/VFs are still
+ * using this VLAN ID. Force the VFTA entry to on */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ bits |= (1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
}
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
out:
return 0;
@@ -1434,6 +1439,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1675,8 +1683,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
{
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1718,8 +1726,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
* @dst_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
{
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1797,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @vm_pool: the Virtual Machine pool to load
**/
s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
+ u8 vm_pool)
{
input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
@@ -1821,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 *vlan)
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2078,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
+ * @input_masks: bitwise masks for relevant fields
+ * @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- u16 soft_id,
- u8 queue)
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4, dst_ipv4;
+ u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
+ u8 fdirm = 0;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2149,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2158,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ if (src_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+
+ if (dst_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
+ break;
+ default:
+ /* this already would have failed above */
+ break;
+ }
+
+ /* Program the last mask register, FDIRM */
+ if (input_masks->vlan_id_mask || !vlan_id)
+ /* Mask both VLAN and VLANP - bits 0 and 1 */
+ fdirm |= 0x3;
+
+ if (input_masks->data_mask || !flex_bytes)
+ /* Flex bytes need masking, so mask the whole thing - bit 4 */
+ fdirm |= 0x10;
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ fdirm |= 0x24;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@@ -2655,4 +2735,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_82599,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 21f158f79dd0..eb49020903c1 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,7 +28,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/list.h>
#include <linux/netdevice.h>
#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @uc_list: the list of new addresses
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list)
+ struct net_device *netdev)
{
u32 i;
u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
}
/* Add the new addresses */
- list_for_each_entry(ha, uc_list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
hw_dbg(hw, " Adding the secondary addresses:\n");
ixgbe_add_uc_addr(hw, ha->addr, 0);
}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dfff0ffaa502..13606d4809c9 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list);
+ struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d77961fc75f9..0d234346a4ea 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = adapter->tx_ring;
- struct ixgbe_ring *rx_ring = adapter->rx_ring;
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */
return 0;
}
@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
- goto err_setup;
+ goto clear_reset;
}
- temp_tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
if (!temp_tx_ring) {
err = -ENOMEM;
- goto err_setup;
+ goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]);
@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) {
i--;
ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ &temp_tx_ring[i]);
}
- goto err_setup;
+ goto clear_reset;
}
}
need_update = true;
}
- temp_rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if ((!temp_rx_ring) && (need_update)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
- kfree(temp_tx_ring);
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
- memcpy(temp_rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]);
@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = temp_tx_ring;
- temp_tx_ring = NULL;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter,
+ adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = temp_rx_ring;
- temp_rx_ring = NULL;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter,
+ adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
ixgbe_up(adapter);
}
+
+ vfree(temp_rx_ring);
err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -974,6 +979,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_NTUPLE_FILTERS:
+ return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
default:
return -EOPNOTSUPP;
}
@@ -1007,13 +1015,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
@@ -1627,7 +1635,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0].reg_idx;
+ int j = adapter->rx_ring[0]->reg_idx;
u32 k;
for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw,
@@ -1867,11 +1875,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
ixgbe_reset(adapter);
DPRINTK(HW, INFO, "loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+skip_loopback:
ixgbe_reset(adapter);
clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -2000,7 +2019,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2053,7 +2072,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
@@ -2134,23 +2153,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
ethtool_op_set_flags(netdev, data);
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
- return 0;
-
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
(!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
+ (!(data & ETH_FLAG_NTUPLE))) {
+ /* turn off Flow Director perfect, set hash and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
+ (data & ETH_FLAG_NTUPLE)) {
+ /* turn off Flow Director hash, enable perfect and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ } else {
+ /* no state change */
+ }
+
+ if (need_reset) {
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
+
return 0;
+}
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+ struct ixgbe_atr_input input_struct;
+ struct ixgbe_atr_input_masks input_masks;
+ int target_queue;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Tx queues.
+ */
+ if ((fs.action >= adapter->num_tx_queues) ||
+ (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ return -EINVAL;
+
+ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+ input_masks.vlan_id_mask = fs.vlan_tag_mask;
+ /* only use the lowest 2 bytes for flex bytes */
+ input_masks.data_mask = (fs.data_mask & 0xffff);
+
+ switch (fs.flow_type) {
+ case TCP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ break;
+ case UDP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Mask bits from the inputs based on user-supplied mask */
+ ixgbe_atr_set_src_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+ ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+ /* 82599 expects these to be byte-swapped for perfect filtering */
+ ixgbe_atr_set_src_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+ ixgbe_atr_set_dst_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+ /* VLAN and Flex bytes are either completely masked or not */
+ if (!fs.vlan_tag_mask)
+ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+ if (!input_masks.data_mask)
+ /* make sure we only use the first 2 bytes of user data */
+ ixgbe_atr_set_flex_byte_82599(&input_struct,
+ (fs.data & 0xffff));
+
+ /* determine if we need to drop or route the packet */
+ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ target_queue = MAX_RX_QUEUES - 1;
+ else
+ target_queue = fs.action;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+ &input_masks, 0, target_queue);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2188,6 +2308,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index e9a20c88c155..4123dec0dfb7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 951b73cf5ca2..43a8de3dc4d6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,12 +45,13 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.44-k2"
+#define DRV_VERSION "2.0.62-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -451,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -479,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
@@ -513,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
@@ -989,7 +1032,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
@@ -999,7 +1042,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
@@ -1025,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1134,7 +1182,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1149,7 +1197,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1254,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
@@ -1268,7 +1319,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
@@ -1327,7 +1378,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1355,7 +1406,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1385,7 +1436,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1394,7 +1445,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1425,7 +1476,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1466,7 +1517,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
@@ -1482,7 +1533,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
@@ -1493,7 +1544,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
@@ -1526,7 +1577,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1711,8 +1762,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -1768,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1776,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
@@ -1817,10 +1875,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -1905,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1950,7 +2010,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1960,8 +2020,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
@@ -1989,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2059,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2090,7 +2168,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2145,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2157,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2184,7 +2266,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2194,7 +2276,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2243,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
@@ -2274,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2312,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2331,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2361,7 +2483,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2414,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -2446,14 +2568,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+ hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2522,7 +2646,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
@@ -2539,7 +2663,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2579,7 +2703,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2589,8 +2713,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2673,7 +2797,7 @@ link_cfg_out:
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2687,8 +2811,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2702,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
u32 dmatxctl;
u32 gpie;
+ u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
@@ -2714,6 +2839,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2770,7 +2899,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
@@ -2784,14 +2913,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2865,7 +3006,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -2875,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
return 0;
}
@@ -2923,7 +3070,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
@@ -3029,7 +3177,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -3041,7 +3189,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3055,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3081,7 +3240,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3094,6 +3253,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
+ /* clear n-tuple filters that are cached */
+ ethtool_ntuple_flush(netdev);
+
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
@@ -3121,13 +3283,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
@@ -3291,6 +3453,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -3304,6 +3479,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
@@ -3393,9 +3577,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
@@ -3422,8 +3606,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3441,18 +3625,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
@@ -3465,12 +3649,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
@@ -3503,9 +3687,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
@@ -3533,8 +3717,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
@@ -3565,8 +3749,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
@@ -3575,6 +3759,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -3588,8 +3790,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
@@ -3619,33 +3824,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
@@ -3700,6 +3935,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
@@ -3741,7 +3979,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -3868,10 +4110,16 @@ err_set_interrupt:
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
@@ -3942,6 +4190,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -3969,10 +4218,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (dev->features & NETIF_F_NTUPLE) {
+ /* Flow Director perfect filter enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ } else {
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ }
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4041,6 +4298,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4060,7 +4320,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4102,7 +4364,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@ -4126,7 +4388,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4172,7 +4436,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@ -4215,8 +4479,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
@@ -4252,8 +4516,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
@@ -4530,8 +4794,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
@@ -4539,11 +4803,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4782,7 +5046,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
@@ -4791,6 +5055,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
netif_tx_start_all_queues(adapter->netdev);
}
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
@@ -4802,13 +5068,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
+ u32 link_speed;
+ bool link_up;
int i;
struct ixgbe_ring *tx_ring;
int some_tx_pending = 0;
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_lock(&ixgbe_watchdog_lock);
+
+ link_up = adapter->link_up;
+ link_speed = adapter->link_speed;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4879,7 +5148,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
@@ -4897,7 +5166,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
ixgbe_update_stats(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_unlock(&ixgbe_watchdog_lock);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -5381,7 +5650,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
@@ -5487,7 +5756,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
@@ -5624,6 +5894,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -5802,6 +6127,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -5822,6 +6149,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -5948,6 +6278,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -5960,6 +6297,8 @@ err_register:
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
@@ -6028,6 +6367,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..d75f9148eb1f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..be7ab3309ab7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..d4cd20f30199
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ int i;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ }
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+ u32 ctrl;
+
+ /* Check if global VLAN already set, if not set it */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_AUPE |
+ IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE |
+ IXGBE_VMOLR_BAM);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf);
+
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ if (adapter->vfinfo[vf].rar > 0) {
+ adapter->hw.mac.ops.clear_rar(&adapter->hw,
+ adapter->vfinfo[vf].rar);
+ adapter->vfinfo[vf].rar = -1;
+ }
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+ vf, IXGBE_RAH_AV);
+ if (adapter->vfinfo[vf].rar < 0) {
+ DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+ return -1;
+ }
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+ return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = (event_mask & 0x3f);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+ "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vfn,
+ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+ }
+
+ return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ printk(KERN_ERR "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac))
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ else
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ break;
+ default:
+ DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..51d1106c45a1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9eafddfa1b97..2be907466593 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
-#include <linux/list.h>
+#include <linux/netdevice.h>
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTPCS 0x0CD00
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1843,6 +1857,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le32
#define __le32 u32
@@ -2109,6 +2129,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
+struct ixgbe_atr_input_masks {
+ u32 src_ip_mask;
+ u32 dst_ip_mask;
+ u16 src_port_mask;
+ u16 dst_port_mask;
+ u16 vlan_id_mask;
+ u16 data_mask;
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -2385,7 +2414,7 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2492,37 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
};
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
struct ixgbe_hw {
u8 __iomem *hw_addr;
void *back;
@@ -2472,6 +2532,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2486,6 +2547,7 @@ struct ixgbe_info {
struct ixgbe_mac_operations *mac_ops;
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 000000000000..dd4e0d27e8cc
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 000000000000..c44fdb05447a
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..399be0c34c36
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (data)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ } else {
+ ixgbevf_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ netif_tx_start_all_queues(netdev);
+ }
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err;
+ u32 new_rx_count, new_tx_count;
+ bool need_tx_update = false;
+ bool need_rx_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter,
+ &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ kfree(tx_ring);
+ goto err_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+ need_tx_update = true;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if ((!rx_ring) && (need_tx_update)) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter,
+ &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ goto err_rx_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+ need_rx_update = true;
+ }
+
+err_rx_setup:
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_tx_update || need_rx_update) {
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+ }
+
+ /* tx */
+ if (need_tx_update) {
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ tx_ring = NULL;
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (need_rx_update) {
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ rx_ring = NULL;
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+ /* success! */
+ err = 0;
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+err_setup:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_rx_csum = ixgbevf_get_rx_csum,
+ .set_rx_csum = ixgbevf_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbevf_set_tso,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..f7015efbff05
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u16 head;
+ u16 tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+ struct vlan_group *vlgrp;
+#endif
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..235b5fd4b8d4
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ unsigned int eop)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of eop */
+ head = readl(hw->hw_addr + tx_ring->head);
+ tail = readl(hw->hw_addr + tx_ring->tail);
+ adapter->detect_tx_hung = false;
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+ /* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ printk(KERN_ERR "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ if (adapter->detect_tx_hung) {
+ if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ printk(KERN_INFO
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbevf_tx_timeout(adapter->netdev);
+ }
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+
+ return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ int ret;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_gro_receive(&q_vector->napi,
+ adapter->vlgrp,
+ tag, skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ ret = netif_rx(skb);
+ }
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb->len - skb->data_len;
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ adapter->netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+
+ return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+ u32 msg;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 10));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j;
+ u32 ctrl;
+
+ adapter->vlgrp = grp;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *v_netdev;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable(adapter, true, true);
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq)
+{
+ struct dev_mc_list *mc_ptr;
+ u8 *addr = *mc_addr_ptr;
+ *vmdq = 0;
+
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ /* reprogram multicast list */
+ addr_count = netdev_mc_count(netdev);
+ if (addr_count)
+ addr_list = netdev->mc_list->dmi_addr;
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ WARN_ON(in_interrupt());
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * Check if PF is up before re-init. If not then skip until
+ * later when the PF is up and ready to service requests from
+ * the VF via mailbox. If the VF is up and running then the
+ * watchdog task will continue to schedule reset tasks until
+ * the PF is up and running.
+ */
+ if (!hw->mac.ops.reset_hw(hw)) {
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+ }
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ random_ether_addr(hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+ ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps" : "1 Gbps"));
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else {
+ /* Force detection of hung controller */
+ adapter->detect_tx_hung = true;
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+pf_has_reset:
+ ixgbevf_update_stats(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vmalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vmalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ "Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = frag->page_offset;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = &ixgbevf_open,
+ .ndo_stop = &ixgbevf_close,
+ .ndo_start_xmit = &ixgbevf_xmit_frame,
+ .ndo_get_stats = &ixgbevf_get_stats,
+ .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &ixgbevf_set_mac,
+ .ndo_change_mtu = &ixgbevf_change_mtu,
+ .ndo_tx_timeout = &ixgbevf_tx_timeout,
+ .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+};
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = netdev_priv(dev);
+ dev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mac_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled \n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->watchdog_task);
+
+ flush_scheduled_work();
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 000000000000..b8143501e6fc
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 000000000000..1b0e0bf4c0f5
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 000000000000..12f75960aec1
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 000000000000..4b5dec0ec140
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ vector_list[i] = vector;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 000000000000..799600e92700
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc3574..558b6a0b15fc 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -288,7 +288,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
wmb();
if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
- msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+ netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
}
static void
@@ -483,13 +483,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- msg_link(jme, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- msg_link(jme, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down.\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -883,20 +883,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
== RXWBFLAG_TCPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "TCP Checksum error\n");
+ netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- msg_rx_err(jme, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
return false;
}
@@ -1186,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- msg_intr(jme, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
while (atomic_read(&jme->link_changing) != 1)
- msg_intr(jme, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1305,7 @@ jme_rx_empty_tasklet(unsigned long arg)
if (unlikely(!netif_carrier_ok(jme->dev)))
return;
- msg_rx_status(jme, "RX Queue Full!\n");
+ netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
@@ -1325,7 +1325,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- msg_tx_done(jme, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
netif_wake_queue(jme->dev);
}
@@ -1835,7 +1835,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- msg_tx_err(jme, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
break;
}
}
@@ -1910,12 +1910,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
}
}
@@ -1923,7 +1923,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
}
}
@@ -1946,7 +1946,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -2013,7 +2013,7 @@ jme_set_multi(struct net_device *netdev)
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
+ mclist && i < netdev_mc_count(netdev);
++i, mclist = mclist->next) {
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
@@ -2473,7 +2473,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2489,7 +2489,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2509,7 +2509,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2526,7 +2526,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2876,14 +2876,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
- "JMC250 Gigabit Ethernet" :
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
- "JMC260 Fast Ethernet" : "Unknown",
- (jme->fpgaver != 0) ? " (FPGA)" : "",
- (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
+ "JMC250 Gigabit Ethernet" :
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
+ "JMC260 Fast Ethernet" : "Unknown",
+ (jme->fpgaver != 0) ? " (FPGA)" : "",
+ (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+ jme->rev, netdev->dev_addr);
return 0;
@@ -2994,7 +2994,7 @@ jme_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id jme_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
{ }
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 251abed3817e..c19db9146a2f 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -45,43 +45,16 @@
printk(KERN_ERR PFX fmt, ## args)
#ifdef TX_DEBUG
-#define tx_dbg(priv, fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#define tx_dbg(priv, fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
#else
-#define tx_dbg(priv, fmt, args...)
+#define tx_dbg(priv, fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
+} while (0)
#endif
-#define jme_msg(msglvl, type, priv, fmt, args...) \
- if (netif_msg_##type(priv)) \
- printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
-
-#define msg_probe(priv, fmt, args...) \
- jme_msg(KERN_INFO, probe, priv, fmt, ## args)
-
-#define msg_link(priv, fmt, args...) \
- jme_msg(KERN_INFO, link, priv, fmt, ## args)
-
-#define msg_intr(priv, fmt, args...) \
- jme_msg(KERN_INFO, intr, priv, fmt, ## args)
-
-#define msg_rx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
-
-#define msg_rx_status(priv, fmt, args...) \
- jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
-
-#define msg_tx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
-
-#define msg_tx_done(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
-
-#define msg_tx_queued(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
-
-#define msg_hw(priv, fmt, args...) \
- jme_msg(KERN_ERR, hw, priv, fmt, ## args)
-
/*
* Extra PCI Configuration space interface
*/
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 25e2af6997e4..af0c764130e6 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -490,19 +490,19 @@ static void korina_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
recognise |= ETH_ARC_PRO;
- else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
/* All multicast and broadcast */
recognise |= ETH_ARC_AM;
/* Build the hash table */
- if (dev->mc_count > 4) {
+ if (netdev_mc_count(dev) > 4) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 6d3ac65bc35c..9845ab1e5573 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -965,14 +965,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
- } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
struct dev_mc_list *mcptr = dev->mc_list;
u32 crc;
int i;
/* accept some multicast */
- for (i = dev->mc_count; i > 0; i--) {
+ for (i = netdev_mc_count(dev); i > 0; i--) {
crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
crc >>= (32 - 6); /* get top six bits */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c0ceebccaa49..ffffb3889704 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -1193,8 +1193,8 @@ static void ks_set_rx_mode(struct net_device *netdev)
else
ks_set_promis(ks, false);
- if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
- if (netdev->mc_count <= MAX_MCAST_LST) {
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
if (!(*ptr->dmi_addr & 1))
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
new file mode 100644
index 000000000000..6f187c7e61fa
--- /dev/null
+++ b/drivers/net/ksz884x.c
@@ -0,0 +1,7335 @@
+/**
+ * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ *
+ * Copyright (c) 2009-2010 Micrel, Inc.
+ * Tristram Ha <Tristram.Ha@micrel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+
+
+/* DMA Registers */
+
+#define KS_DMA_TX_CTRL 0x0000
+#define DMA_TX_ENABLE 0x00000001
+#define DMA_TX_CRC_ENABLE 0x00000002
+#define DMA_TX_PAD_ENABLE 0x00000004
+#define DMA_TX_LOOPBACK 0x00000100
+#define DMA_TX_FLOW_ENABLE 0x00000200
+#define DMA_TX_CSUM_IP 0x00010000
+#define DMA_TX_CSUM_TCP 0x00020000
+#define DMA_TX_CSUM_UDP 0x00040000
+#define DMA_TX_BURST_SIZE 0x3F000000
+
+#define KS_DMA_RX_CTRL 0x0004
+#define DMA_RX_ENABLE 0x00000001
+#define KS884X_DMA_RX_MULTICAST 0x00000002
+#define DMA_RX_PROMISCUOUS 0x00000004
+#define DMA_RX_ERROR 0x00000008
+#define DMA_RX_UNICAST 0x00000010
+#define DMA_RX_ALL_MULTICAST 0x00000020
+#define DMA_RX_BROADCAST 0x00000040
+#define DMA_RX_FLOW_ENABLE 0x00000200
+#define DMA_RX_CSUM_IP 0x00010000
+#define DMA_RX_CSUM_TCP 0x00020000
+#define DMA_RX_CSUM_UDP 0x00040000
+#define DMA_RX_BURST_SIZE 0x3F000000
+
+#define DMA_BURST_SHIFT 24
+#define DMA_BURST_DEFAULT 8
+
+#define KS_DMA_TX_START 0x0008
+#define KS_DMA_RX_START 0x000C
+#define DMA_START 0x00000001
+
+#define KS_DMA_TX_ADDR 0x0010
+#define KS_DMA_RX_ADDR 0x0014
+
+#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
+#define DMA_ADDR_LIST_SHIFT 2
+
+/* MTR0 */
+#define KS884X_MULTICAST_0_OFFSET 0x0020
+#define KS884X_MULTICAST_1_OFFSET 0x0021
+#define KS884X_MULTICAST_2_OFFSET 0x0022
+#define KS884x_MULTICAST_3_OFFSET 0x0023
+/* MTR1 */
+#define KS884X_MULTICAST_4_OFFSET 0x0024
+#define KS884X_MULTICAST_5_OFFSET 0x0025
+#define KS884X_MULTICAST_6_OFFSET 0x0026
+#define KS884X_MULTICAST_7_OFFSET 0x0027
+
+/* Interrupt Registers */
+
+/* INTEN */
+#define KS884X_INTERRUPTS_ENABLE 0x0028
+/* INTST */
+#define KS884X_INTERRUPTS_STATUS 0x002C
+
+#define KS884X_INT_RX_STOPPED 0x02000000
+#define KS884X_INT_TX_STOPPED 0x04000000
+#define KS884X_INT_RX_OVERRUN 0x08000000
+#define KS884X_INT_TX_EMPTY 0x10000000
+#define KS884X_INT_RX 0x20000000
+#define KS884X_INT_TX 0x40000000
+#define KS884X_INT_PHY 0x80000000
+
+#define KS884X_INT_RX_MASK \
+ (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
+#define KS884X_INT_TX_MASK \
+ (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
+#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
+
+/* MAC Additional Station Address */
+
+/* MAAL0 */
+#define KS_ADD_ADDR_0_LO 0x0080
+/* MAAH0 */
+#define KS_ADD_ADDR_0_HI 0x0084
+/* MAAL1 */
+#define KS_ADD_ADDR_1_LO 0x0088
+/* MAAH1 */
+#define KS_ADD_ADDR_1_HI 0x008C
+/* MAAL2 */
+#define KS_ADD_ADDR_2_LO 0x0090
+/* MAAH2 */
+#define KS_ADD_ADDR_2_HI 0x0094
+/* MAAL3 */
+#define KS_ADD_ADDR_3_LO 0x0098
+/* MAAH3 */
+#define KS_ADD_ADDR_3_HI 0x009C
+/* MAAL4 */
+#define KS_ADD_ADDR_4_LO 0x00A0
+/* MAAH4 */
+#define KS_ADD_ADDR_4_HI 0x00A4
+/* MAAL5 */
+#define KS_ADD_ADDR_5_LO 0x00A8
+/* MAAH5 */
+#define KS_ADD_ADDR_5_HI 0x00AC
+/* MAAL6 */
+#define KS_ADD_ADDR_6_LO 0x00B0
+/* MAAH6 */
+#define KS_ADD_ADDR_6_HI 0x00B4
+/* MAAL7 */
+#define KS_ADD_ADDR_7_LO 0x00B8
+/* MAAH7 */
+#define KS_ADD_ADDR_7_HI 0x00BC
+/* MAAL8 */
+#define KS_ADD_ADDR_8_LO 0x00C0
+/* MAAH8 */
+#define KS_ADD_ADDR_8_HI 0x00C4
+/* MAAL9 */
+#define KS_ADD_ADDR_9_LO 0x00C8
+/* MAAH9 */
+#define KS_ADD_ADDR_9_HI 0x00CC
+/* MAAL10 */
+#define KS_ADD_ADDR_A_LO 0x00D0
+/* MAAH10 */
+#define KS_ADD_ADDR_A_HI 0x00D4
+/* MAAL11 */
+#define KS_ADD_ADDR_B_LO 0x00D8
+/* MAAH11 */
+#define KS_ADD_ADDR_B_HI 0x00DC
+/* MAAL12 */
+#define KS_ADD_ADDR_C_LO 0x00E0
+/* MAAH12 */
+#define KS_ADD_ADDR_C_HI 0x00E4
+/* MAAL13 */
+#define KS_ADD_ADDR_D_LO 0x00E8
+/* MAAH13 */
+#define KS_ADD_ADDR_D_HI 0x00EC
+/* MAAL14 */
+#define KS_ADD_ADDR_E_LO 0x00F0
+/* MAAH14 */
+#define KS_ADD_ADDR_E_HI 0x00F4
+/* MAAL15 */
+#define KS_ADD_ADDR_F_LO 0x00F8
+/* MAAH15 */
+#define KS_ADD_ADDR_F_HI 0x00FC
+
+#define ADD_ADDR_HI_MASK 0x0000FFFF
+#define ADD_ADDR_ENABLE 0x80000000
+#define ADD_ADDR_INCR 8
+
+/* Miscellaneous Registers */
+
+/* MARL */
+#define KS884X_ADDR_0_OFFSET 0x0200
+#define KS884X_ADDR_1_OFFSET 0x0201
+/* MARM */
+#define KS884X_ADDR_2_OFFSET 0x0202
+#define KS884X_ADDR_3_OFFSET 0x0203
+/* MARH */
+#define KS884X_ADDR_4_OFFSET 0x0204
+#define KS884X_ADDR_5_OFFSET 0x0205
+
+/* OBCR */
+#define KS884X_BUS_CTRL_OFFSET 0x0210
+
+#define BUS_SPEED_125_MHZ 0x0000
+#define BUS_SPEED_62_5_MHZ 0x0001
+#define BUS_SPEED_41_66_MHZ 0x0002
+#define BUS_SPEED_25_MHZ 0x0003
+
+/* EEPCR */
+#define KS884X_EEPROM_CTRL_OFFSET 0x0212
+
+#define EEPROM_CHIP_SELECT 0x0001
+#define EEPROM_SERIAL_CLOCK 0x0002
+#define EEPROM_DATA_OUT 0x0004
+#define EEPROM_DATA_IN 0x0008
+#define EEPROM_ACCESS_ENABLE 0x0010
+
+/* MBIR */
+#define KS884X_MEM_INFO_OFFSET 0x0214
+
+#define RX_MEM_TEST_FAILED 0x0008
+#define RX_MEM_TEST_FINISHED 0x0010
+#define TX_MEM_TEST_FAILED 0x0800
+#define TX_MEM_TEST_FINISHED 0x1000
+
+/* GCR */
+#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
+#define GLOBAL_SOFTWARE_RESET 0x0001
+
+#define KS8841_POWER_MANAGE_OFFSET 0x0218
+
+/* WFCR */
+#define KS8841_WOL_CTRL_OFFSET 0x021A
+#define KS8841_WOL_MAGIC_ENABLE 0x0080
+#define KS8841_WOL_FRAME3_ENABLE 0x0008
+#define KS8841_WOL_FRAME2_ENABLE 0x0004
+#define KS8841_WOL_FRAME1_ENABLE 0x0002
+#define KS8841_WOL_FRAME0_ENABLE 0x0001
+
+/* WF0 */
+#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
+#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
+#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
+
+/* IACR */
+#define KS884X_IACR_P 0x04A0
+#define KS884X_IACR_OFFSET KS884X_IACR_P
+
+/* IADR1 */
+#define KS884X_IADR1_P 0x04A2
+#define KS884X_IADR2_P 0x04A4
+#define KS884X_IADR3_P 0x04A6
+#define KS884X_IADR4_P 0x04A8
+#define KS884X_IADR5_P 0x04AA
+
+#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
+#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
+
+#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
+#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
+#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
+#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
+#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
+#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
+#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
+#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
+#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
+
+/* P1MBCR */
+#define KS884X_P1MBCR_P 0x04D0
+#define KS884X_P1MBSR_P 0x04D2
+#define KS884X_PHY1ILR_P 0x04D4
+#define KS884X_PHY1IHR_P 0x04D6
+#define KS884X_P1ANAR_P 0x04D8
+#define KS884X_P1ANLPR_P 0x04DA
+
+/* P2MBCR */
+#define KS884X_P2MBCR_P 0x04E0
+#define KS884X_P2MBSR_P 0x04E2
+#define KS884X_PHY2ILR_P 0x04E4
+#define KS884X_PHY2IHR_P 0x04E6
+#define KS884X_P2ANAR_P 0x04E8
+#define KS884X_P2ANLPR_P 0x04EA
+
+#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
+#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
+
+#define KS884X_PHY_CTRL_OFFSET 0x00
+
+/* Mode Control Register */
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET 0x8000
+#define PHY_LOOPBACK 0x4000
+#define PHY_SPEED_100MBIT 0x2000
+#define PHY_AUTO_NEG_ENABLE 0x1000
+#define PHY_POWER_DOWN 0x0800
+#define PHY_MII_DISABLE 0x0400
+#define PHY_AUTO_NEG_RESTART 0x0200
+#define PHY_FULL_DUPLEX 0x0100
+#define PHY_COLLISION_TEST 0x0080
+#define PHY_HP_MDIX 0x0020
+#define PHY_FORCE_MDIX 0x0010
+#define PHY_AUTO_MDIX_DISABLE 0x0008
+#define PHY_REMOTE_FAULT_DISABLE 0x0004
+#define PHY_TRANSMIT_DISABLE 0x0002
+#define PHY_LED_DISABLE 0x0001
+
+#define KS884X_PHY_STATUS_OFFSET 0x02
+
+/* Mode Status Register */
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE 0x8000
+#define PHY_100BTX_FD_CAPABLE 0x4000
+#define PHY_100BTX_CAPABLE 0x2000
+#define PHY_10BT_FD_CAPABLE 0x1000
+#define PHY_10BT_CAPABLE 0x0800
+#define PHY_MII_SUPPRESS_CAPABLE 0x0040
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
+#define PHY_REMOTE_FAULT 0x0010
+#define PHY_AUTO_NEG_CAPABLE 0x0008
+#define PHY_LINK_STATUS 0x0004
+#define PHY_JABBER_DETECT 0x0002
+#define PHY_EXTENDED_CAPABILITY 0x0001
+
+#define KS884X_PHY_ID_1_OFFSET 0x04
+#define KS884X_PHY_ID_2_OFFSET 0x06
+
+/* PHY Identifier Registers */
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
+
+/* Auto-Negotiation Advertisement Register */
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
+#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
+/* Not supported. */
+#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
+#define PHY_AUTO_NEG_100BT4 0x0200
+#define PHY_AUTO_NEG_100BTX_FD 0x0100
+#define PHY_AUTO_NEG_100BTX 0x0080
+#define PHY_AUTO_NEG_10BT_FD 0x0040
+#define PHY_AUTO_NEG_10BT 0x0020
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
+
+#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
+
+/* Auto-Negotiation Link Partner Ability Register */
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE 0x8000
+#define PHY_REMOTE_ACKNOWLEDGE 0x4000
+#define PHY_REMOTE_REMOTE_FAULT 0x2000
+#define PHY_REMOTE_SYM_PAUSE 0x0400
+#define PHY_REMOTE_100BTX_FD 0x0100
+#define PHY_REMOTE_100BTX 0x0080
+#define PHY_REMOTE_10BT_FD 0x0040
+#define PHY_REMOTE_10BT 0x0020
+
+/* P1VCT */
+#define KS884X_P1VCT_P 0x04F0
+#define KS884X_P1PHYCTRL_P 0x04F2
+
+/* P2VCT */
+#define KS884X_P2VCT_P 0x04F4
+#define KS884X_P2PHYCTRL_P 0x04F6
+
+#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
+#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
+
+#define KS884X_PHY_LINK_MD_OFFSET 0x00
+
+#define PHY_START_CABLE_DIAG 0x8000
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT 0x1000
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
+
+#define PHY_STAT_REVERSED_POLARITY 0x0020
+#define PHY_STAT_MDIX 0x0010
+#define PHY_FORCE_LINK 0x0008
+#define PHY_POWER_SAVING_DISABLE 0x0004
+#define PHY_REMOTE_LOOPBACK 0x0002
+
+/* SIDER */
+#define KS884X_SIDER_P 0x0400
+#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
+#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
+
+#define REG_FAMILY_ID 0x88
+
+#define REG_CHIP_ID_41 0x8810
+#define REG_CHIP_ID_42 0x8800
+
+#define KS884X_CHIP_ID_MASK_41 0xFF10
+#define KS884X_CHIP_ID_MASK 0xFFF0
+#define KS884X_CHIP_ID_SHIFT 4
+#define KS884X_REVISION_MASK 0x000E
+#define KS884X_REVISION_SHIFT 1
+#define KS8842_START 0x0001
+
+#define CHIP_IP_41_M 0x8810
+#define CHIP_IP_42_M 0x8800
+#define CHIP_IP_61_M 0x8890
+#define CHIP_IP_62_M 0x8880
+
+#define CHIP_IP_41_P 0x8850
+#define CHIP_IP_42_P 0x8840
+#define CHIP_IP_61_P 0x88D0
+#define CHIP_IP_62_P 0x88C0
+
+/* SGCR1 */
+#define KS8842_SGCR1_P 0x0402
+#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
+
+#define SWITCH_PASS_ALL 0x8000
+#define SWITCH_TX_FLOW_CTRL 0x2000
+#define SWITCH_RX_FLOW_CTRL 0x1000
+#define SWITCH_CHECK_LENGTH 0x0800
+#define SWITCH_AGING_ENABLE 0x0400
+#define SWITCH_FAST_AGING 0x0200
+#define SWITCH_AGGR_BACKOFF 0x0100
+#define SWITCH_PASS_PAUSE 0x0008
+#define SWITCH_LINK_AUTO_AGING 0x0001
+
+/* SGCR2 */
+#define KS8842_SGCR2_P 0x0404
+#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
+
+#define SWITCH_VLAN_ENABLE 0x8000
+#define SWITCH_IGMP_SNOOP 0x4000
+#define IPV6_MLD_SNOOP_ENABLE 0x2000
+#define IPV6_MLD_SNOOP_OPTION 0x1000
+#define PRIORITY_SCHEME_SELECT 0x0800
+#define SWITCH_MIRROR_RX_TX 0x0100
+#define UNICAST_VLAN_BOUNDARY 0x0080
+#define MULTICAST_STORM_DISABLE 0x0040
+#define SWITCH_BACK_PRESSURE 0x0020
+#define FAIR_FLOW_CTRL 0x0010
+#define NO_EXC_COLLISION_DROP 0x0008
+#define SWITCH_HUGE_PACKET 0x0004
+#define SWITCH_LEGAL_PACKET 0x0002
+#define SWITCH_BUF_RESERVE 0x0001
+
+/* SGCR3 */
+#define KS8842_SGCR3_P 0x0406
+#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
+
+#define BROADCAST_STORM_RATE_LO 0xFF00
+#define SWITCH_REPEATER 0x0080
+#define SWITCH_HALF_DUPLEX 0x0040
+#define SWITCH_FLOW_CTRL 0x0020
+#define SWITCH_10_MBIT 0x0010
+#define SWITCH_REPLACE_NULL_VID 0x0008
+#define BROADCAST_STORM_RATE_HI 0x0007
+
+#define BROADCAST_STORM_RATE 0x07FF
+
+/* SGCR4 */
+#define KS8842_SGCR4_P 0x0408
+
+/* SGCR5 */
+#define KS8842_SGCR5_P 0x040A
+#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
+
+#define LED_MODE 0x8200
+#define LED_SPEED_DUPLEX_ACT 0x0000
+#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
+#define LED_DUPLEX_10_100 0x0200
+
+/* SGCR6 */
+#define KS8842_SGCR6_P 0x0410
+#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
+
+#define KS8842_PRIORITY_MASK 3
+#define KS8842_PRIORITY_SHIFT 2
+
+/* SGCR7 */
+#define KS8842_SGCR7_P 0x0412
+#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
+
+#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
+#define SWITCH_UNK_DEF_PORT_3 0x0004
+#define SWITCH_UNK_DEF_PORT_2 0x0002
+#define SWITCH_UNK_DEF_PORT_1 0x0001
+
+/* MACAR1 */
+#define KS8842_MACAR1_P 0x0470
+#define KS8842_MACAR2_P 0x0472
+#define KS8842_MACAR3_P 0x0474
+#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
+#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
+#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
+#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
+#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
+#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
+
+/* TOSR1 */
+#define KS8842_TOSR1_P 0x0480
+#define KS8842_TOSR2_P 0x0482
+#define KS8842_TOSR3_P 0x0484
+#define KS8842_TOSR4_P 0x0486
+#define KS8842_TOSR5_P 0x0488
+#define KS8842_TOSR6_P 0x048A
+#define KS8842_TOSR7_P 0x0490
+#define KS8842_TOSR8_P 0x0492
+#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
+#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
+#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
+#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
+#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
+#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
+
+#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
+#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
+
+/* P1CR1 */
+#define KS8842_P1CR1_P 0x0500
+#define KS8842_P1CR2_P 0x0502
+#define KS8842_P1VIDR_P 0x0504
+#define KS8842_P1CR3_P 0x0506
+#define KS8842_P1IRCR_P 0x0508
+#define KS8842_P1ERCR_P 0x050A
+#define KS884X_P1SCSLMD_P 0x0510
+#define KS884X_P1CR4_P 0x0512
+#define KS884X_P1SR_P 0x0514
+
+/* P2CR1 */
+#define KS8842_P2CR1_P 0x0520
+#define KS8842_P2CR2_P 0x0522
+#define KS8842_P2VIDR_P 0x0524
+#define KS8842_P2CR3_P 0x0526
+#define KS8842_P2IRCR_P 0x0528
+#define KS8842_P2ERCR_P 0x052A
+#define KS884X_P2SCSLMD_P 0x0530
+#define KS884X_P2CR4_P 0x0532
+#define KS884X_P2SR_P 0x0534
+
+/* P3CR1 */
+#define KS8842_P3CR1_P 0x0540
+#define KS8842_P3CR2_P 0x0542
+#define KS8842_P3VIDR_P 0x0544
+#define KS8842_P3CR3_P 0x0546
+#define KS8842_P3IRCR_P 0x0548
+#define KS8842_P3ERCR_P 0x054A
+
+#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
+#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
+#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
+
+#define PORT_CTRL_ADDR(port, addr) \
+ (addr = KS8842_PORT_1_CTRL_1 + (port) * \
+ (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
+
+#define KS8842_PORT_CTRL_1_OFFSET 0x00
+
+#define PORT_BROADCAST_STORM 0x0080
+#define PORT_DIFFSERV_ENABLE 0x0040
+#define PORT_802_1P_ENABLE 0x0020
+#define PORT_BASED_PRIORITY_MASK 0x0018
+#define PORT_BASED_PRIORITY_BASE 0x0003
+#define PORT_BASED_PRIORITY_SHIFT 3
+#define PORT_BASED_PRIORITY_0 0x0000
+#define PORT_BASED_PRIORITY_1 0x0008
+#define PORT_BASED_PRIORITY_2 0x0010
+#define PORT_BASED_PRIORITY_3 0x0018
+#define PORT_INSERT_TAG 0x0004
+#define PORT_REMOVE_TAG 0x0002
+#define PORT_PRIO_QUEUE_ENABLE 0x0001
+
+#define KS8842_PORT_CTRL_2_OFFSET 0x02
+
+#define PORT_INGRESS_VLAN_FILTER 0x4000
+#define PORT_DISCARD_NON_VID 0x2000
+#define PORT_FORCE_FLOW_CTRL 0x1000
+#define PORT_BACK_PRESSURE 0x0800
+#define PORT_TX_ENABLE 0x0400
+#define PORT_RX_ENABLE 0x0200
+#define PORT_LEARN_DISABLE 0x0100
+#define PORT_MIRROR_SNIFFER 0x0080
+#define PORT_MIRROR_RX 0x0040
+#define PORT_MIRROR_TX 0x0020
+#define PORT_USER_PRIORITY_CEILING 0x0008
+#define PORT_VLAN_MEMBERSHIP 0x0007
+
+#define KS8842_PORT_CTRL_VID_OFFSET 0x04
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define KS8842_PORT_CTRL_3_OFFSET 0x06
+
+#define PORT_INGRESS_LIMIT_MODE 0x000C
+#define PORT_INGRESS_ALL 0x0000
+#define PORT_INGRESS_UNICAST 0x0004
+#define PORT_INGRESS_MULTICAST 0x0008
+#define PORT_INGRESS_BROADCAST 0x000C
+#define PORT_COUNT_IFG 0x0002
+#define PORT_COUNT_PREAMBLE 0x0001
+
+#define KS8842_PORT_IN_RATE_OFFSET 0x08
+#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
+
+#define PORT_PRIORITY_RATE 0x0F
+#define PORT_PRIORITY_RATE_SHIFT 4
+
+#define KS884X_PORT_LINK_MD 0x10
+
+#define PORT_CABLE_10M_SHORT 0x8000
+#define PORT_CABLE_DIAG_RESULT 0x6000
+#define PORT_CABLE_STAT_NORMAL 0x0000
+#define PORT_CABLE_STAT_OPEN 0x2000
+#define PORT_CABLE_STAT_SHORT 0x4000
+#define PORT_CABLE_STAT_FAILED 0x6000
+#define PORT_START_CABLE_DIAG 0x1000
+#define PORT_FORCE_LINK 0x0800
+#define PORT_POWER_SAVING_DISABLE 0x0400
+#define PORT_PHY_REMOTE_LOOPBACK 0x0200
+#define PORT_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PORT_CTRL_4_OFFSET 0x12
+
+#define PORT_LED_OFF 0x8000
+#define PORT_TX_DISABLE 0x4000
+#define PORT_AUTO_NEG_RESTART 0x2000
+#define PORT_REMOTE_FAULT_DISABLE 0x1000
+#define PORT_POWER_DOWN 0x0800
+#define PORT_AUTO_MDIX_DISABLE 0x0400
+#define PORT_FORCE_MDIX 0x0200
+#define PORT_LOOPBACK 0x0100
+#define PORT_AUTO_NEG_ENABLE 0x0080
+#define PORT_FORCE_100_MBIT 0x0040
+#define PORT_FORCE_FULL_DUPLEX 0x0020
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
+#define PORT_AUTO_NEG_100BTX_FD 0x0008
+#define PORT_AUTO_NEG_100BTX 0x0004
+#define PORT_AUTO_NEG_10BT_FD 0x0002
+#define PORT_AUTO_NEG_10BT 0x0001
+
+#define KS884X_PORT_STATUS_OFFSET 0x14
+
+#define PORT_HP_MDIX 0x8000
+#define PORT_REVERSED_POLARITY 0x2000
+#define PORT_RX_FLOW_CTRL 0x0800
+#define PORT_TX_FLOW_CTRL 0x1000
+#define PORT_STATUS_SPEED_100MBIT 0x0400
+#define PORT_STATUS_FULL_DUPLEX 0x0200
+#define PORT_REMOTE_FAULT 0x0100
+#define PORT_MDIX_STATUS 0x0080
+#define PORT_AUTO_NEG_COMPLETE 0x0040
+#define PORT_STATUS_LINK_GOOD 0x0020
+#define PORT_REMOTE_SYM_PAUSE 0x0010
+#define PORT_REMOTE_100BTX_FD 0x0008
+#define PORT_REMOTE_100BTX 0x0004
+#define PORT_REMOTE_10BT_FD 0x0002
+#define PORT_REMOTE_10BT 0x0001
+
+/*
+#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
+#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
+#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
+#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
+#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
+*/
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
+#define STATIC_MAC_TABLE_VALID 0x00080000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
+#define STATIC_MAC_TABLE_USE_FID 0x00200000
+#define STATIC_MAC_TABLE_FID 0x03C00000
+
+#define STATIC_MAC_FWD_PORTS_SHIFT 16
+#define STATIC_MAC_FID_SHIFT 22
+
+/*
+#define VLAN_TABLE_VID 00-00000000-00000FFF
+#define VLAN_TABLE_FID 00-00000000-0000F000
+#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
+#define VLAN_TABLE_VALID 00-00000000-00080000
+*/
+
+#define VLAN_TABLE_VID 0x00000FFF
+#define VLAN_TABLE_FID 0x0000F000
+#define VLAN_TABLE_MEMBERSHIP 0x00070000
+#define VLAN_TABLE_VALID 0x00080000
+
+#define VLAN_TABLE_FID_SHIFT 12
+#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
+
+/*
+#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
+#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
+#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
+*/
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x000F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
+#define DYNAMIC_MAC_TABLE_RESERVED 0x78
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_FID_SHIFT 16
+#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
+#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
+#define DYNAMIC_MAC_ENTRIES_SHIFT 24
+#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
+
+/*
+#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+#define MIB_COUNTER_VALID 00-00000000-40000000
+#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
+*/
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+#define MIB_COUNTER_VALID 0x40000000
+#define MIB_COUNTER_OVERFLOW 0x80000000
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define KS_MIB_PACKET_DROPPED_TX_0 0x100
+#define KS_MIB_PACKET_DROPPED_TX_1 0x101
+#define KS_MIB_PACKET_DROPPED_TX 0x102
+#define KS_MIB_PACKET_DROPPED_RX_0 0x103
+#define KS_MIB_PACKET_DROPPED_RX_1 0x104
+#define KS_MIB_PACKET_DROPPED_RX 0x105
+
+/* Change default LED mode. */
+#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
+
+#define MAC_ADDR_LEN 6
+#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+
+#define MAX_ETHERNET_BODY_SIZE 1500
+#define ETHERNET_HEADER_SIZE 14
+
+#define MAX_ETHERNET_PACKET_SIZE \
+ (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
+
+#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
+#define MAX_RX_BUF_SIZE (1912 + 4)
+
+#define ADDITIONAL_ENTRIES 16
+#define MAX_MULTICAST_LIST 32
+
+#define HW_MULTICAST_SIZE 8
+
+#define HW_TO_DEV_PORT(port) (port - 1)
+
+enum {
+ media_connected,
+ media_disconnected
+};
+
+enum {
+ OID_COUNTER_UNKOWN,
+
+ OID_COUNTER_FIRST,
+
+ /* total transmit errors */
+ OID_COUNTER_XMIT_ERROR,
+
+ /* total receive errors */
+ OID_COUNTER_RCV_ERROR,
+
+ OID_COUNTER_LAST
+};
+
+/*
+ * Hardware descriptor definitions
+ */
+
+#define DESC_ALIGNMENT 16
+#define BUFFER_ALIGNMENT 8
+
+#define NUM_OF_RX_DESC 64
+#define NUM_OF_TX_DESC 64
+
+#define KS_DESC_RX_FRAME_LEN 0x000007FF
+#define KS_DESC_RX_FRAME_TYPE 0x00008000
+#define KS_DESC_RX_ERROR_CRC 0x00010000
+#define KS_DESC_RX_ERROR_RUNT 0x00020000
+#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
+#define KS_DESC_RX_ERROR_PHY 0x00080000
+#define KS884X_DESC_RX_PORT_MASK 0x00300000
+#define KS_DESC_RX_MULTICAST 0x01000000
+#define KS_DESC_RX_ERROR 0x02000000
+#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
+#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
+#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
+#define KS_DESC_RX_LAST 0x20000000
+#define KS_DESC_RX_FIRST 0x40000000
+#define KS_DESC_RX_ERROR_COND \
+ (KS_DESC_RX_ERROR_CRC | \
+ KS_DESC_RX_ERROR_RUNT | \
+ KS_DESC_RX_ERROR_PHY | \
+ KS_DESC_RX_ERROR_TOO_LONG)
+
+#define KS_DESC_HW_OWNED 0x80000000
+
+#define KS_DESC_BUF_SIZE 0x000007FF
+#define KS884X_DESC_TX_PORT_MASK 0x00300000
+#define KS_DESC_END_OF_RING 0x02000000
+#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
+#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
+#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
+#define KS_DESC_TX_LAST 0x20000000
+#define KS_DESC_TX_FIRST 0x40000000
+#define KS_DESC_TX_INTERRUPT 0x80000000
+
+#define KS_DESC_PORT_SHIFT 20
+
+#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
+
+#define KS_DESC_TX_MASK \
+ (KS_DESC_TX_INTERRUPT | \
+ KS_DESC_TX_FIRST | \
+ KS_DESC_TX_LAST | \
+ KS_DESC_TX_CSUM_GEN_IP | \
+ KS_DESC_TX_CSUM_GEN_TCP | \
+ KS_DESC_TX_CSUM_GEN_UDP | \
+ KS_DESC_BUF_SIZE)
+
+struct ksz_desc_rx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 first_desc:1;
+ u32 last_desc:1;
+ u32 csum_err_ip:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_udp:1;
+ u32 error:1;
+ u32 multicast:1;
+ u32 src_port:4;
+ u32 err_phy:1;
+ u32 err_too_long:1;
+ u32 err_runt:1;
+ u32 err_crc:1;
+ u32 frame_type:1;
+ u32 reserved1:4;
+ u32 frame_len:11;
+#else
+ u32 frame_len:11;
+ u32 reserved1:4;
+ u32 frame_type:1;
+ u32 err_crc:1;
+ u32 err_runt:1;
+ u32 err_too_long:1;
+ u32 err_phy:1;
+ u32 src_port:4;
+ u32 multicast:1;
+ u32 error:1;
+ u32 csum_err_udp:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_ip:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_tx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 reserved1:31;
+#else
+ u32 reserved1:31;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_rx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved4:6;
+ u32 end_of_ring:1;
+ u32 reserved3:14;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:14;
+ u32 end_of_ring:1;
+ u32 reserved4:6;
+#endif
+};
+
+struct ksz_desc_tx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 intr:1;
+ u32 first_seg:1;
+ u32 last_seg:1;
+ u32 csum_gen_ip:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_udp:1;
+ u32 end_of_ring:1;
+ u32 reserved4:1;
+ u32 dest_port:4;
+ u32 reserved3:9;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:9;
+ u32 dest_port:4;
+ u32 reserved4:1;
+ u32 end_of_ring:1;
+ u32 csum_gen_udp:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_ip:1;
+ u32 last_seg:1;
+ u32 first_seg:1;
+ u32 intr:1;
+#endif
+};
+
+union desc_stat {
+ struct ksz_desc_rx_stat rx;
+ struct ksz_desc_tx_stat tx;
+ u32 data;
+};
+
+union desc_buf {
+ struct ksz_desc_rx_buf rx;
+ struct ksz_desc_tx_buf tx;
+ u32 data;
+};
+
+/**
+ * struct ksz_hw_desc - Hardware descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @addr: Physical address of memory buffer.
+ * @next: Pointer to next hardware descriptor.
+ */
+struct ksz_hw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 addr;
+ u32 next;
+};
+
+/**
+ * struct ksz_sw_desc - Software descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @buf_size: Current buffers size value in hardware descriptor.
+ */
+struct ksz_sw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 buf_size;
+};
+
+/**
+ * struct ksz_dma_buf - OS dependent DMA buffer data structure
+ * @skb: Associated socket buffer.
+ * @dma: Associated physical DMA address.
+ * len: Actual len used.
+ */
+struct ksz_dma_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ int len;
+};
+
+/**
+ * struct ksz_desc - Descriptor structure
+ * @phw: Hardware descriptor pointer to uncached physical memory.
+ * @sw: Cached memory to hold hardware descriptor values for
+ * manipulation.
+ * @dma_buf: Operating system dependent data structure to hold physical
+ * memory buffer allocation information.
+ */
+struct ksz_desc {
+ struct ksz_hw_desc *phw;
+ struct ksz_sw_desc sw;
+ struct ksz_dma_buf dma_buf;
+};
+
+#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
+
+/**
+ * struct ksz_desc_info - Descriptor information data structure
+ * @ring: First descriptor in the ring.
+ * @cur: Current descriptor being manipulated.
+ * @ring_virt: First hardware descriptor in the ring.
+ * @ring_phys: The physical address of the first descriptor of the ring.
+ * @size: Size of hardware descriptor.
+ * @alloc: Number of descriptors allocated.
+ * @avail: Number of descriptors available for use.
+ * @last: Index for last descriptor released to hardware.
+ * @next: Index for next descriptor available for use.
+ * @mask: Mask for index wrapping.
+ */
+struct ksz_desc_info {
+ struct ksz_desc *ring;
+ struct ksz_desc *cur;
+ struct ksz_hw_desc *ring_virt;
+ u32 ring_phys;
+ int size;
+ int alloc;
+ int avail;
+ int last;
+ int next;
+ int mask;
+};
+
+/*
+ * KSZ8842 switch definitions
+ */
+
+enum {
+ TABLE_STATIC_MAC = 0,
+ TABLE_VLAN,
+ TABLE_DYNAMIC_MAC,
+ TABLE_MIB
+};
+
+#define LEARNED_MAC_TABLE_ENTRIES 1024
+#define STATIC_MAC_TABLE_ENTRIES 8
+
+/**
+ * struct ksz_mac_table - Static MAC table data structure
+ * @mac_addr: MAC address to filter.
+ * @vid: VID value.
+ * @fid: FID value.
+ * @ports: Port membership.
+ * @override: Override setting.
+ * @use_fid: FID use setting.
+ * @valid: Valid setting indicating the entry is being used.
+ */
+struct ksz_mac_table {
+ u8 mac_addr[MAC_ADDR_LEN];
+ u16 vid;
+ u8 fid;
+ u8 ports;
+ u8 override:1;
+ u8 use_fid:1;
+ u8 valid:1;
+};
+
+#define VLAN_TABLE_ENTRIES 16
+
+/**
+ * struct ksz_vlan_table - VLAN table data structure
+ * @vid: VID value.
+ * @fid: FID value.
+ * @member: Port membership.
+ */
+struct ksz_vlan_table {
+ u16 vid;
+ u8 fid;
+ u8 member;
+};
+
+#define DIFFSERV_ENTRIES 64
+#define PRIO_802_1P_ENTRIES 8
+#define PRIO_QUEUES 4
+
+#define SWITCH_PORT_NUM 2
+#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
+#define HOST_MASK (1 << SWITCH_PORT_NUM)
+#define PORT_MASK 7
+
+#define MAIN_PORT 0
+#define OTHER_PORT 1
+#define HOST_PORT SWITCH_PORT_NUM
+
+#define PORT_COUNTER_NUM 0x20
+#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
+
+#define MIB_COUNTER_RX_LO_PRIORITY 0x00
+#define MIB_COUNTER_RX_HI_PRIORITY 0x01
+#define MIB_COUNTER_RX_UNDERSIZE 0x02
+#define MIB_COUNTER_RX_FRAGMENT 0x03
+#define MIB_COUNTER_RX_OVERSIZE 0x04
+#define MIB_COUNTER_RX_JABBER 0x05
+#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
+#define MIB_COUNTER_RX_CRC_ERR 0x07
+#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
+#define MIB_COUNTER_RX_CTRL_8808 0x09
+#define MIB_COUNTER_RX_PAUSE 0x0A
+#define MIB_COUNTER_RX_BROADCAST 0x0B
+#define MIB_COUNTER_RX_MULTICAST 0x0C
+#define MIB_COUNTER_RX_UNICAST 0x0D
+#define MIB_COUNTER_RX_OCTET_64 0x0E
+#define MIB_COUNTER_RX_OCTET_65_127 0x0F
+#define MIB_COUNTER_RX_OCTET_128_255 0x10
+#define MIB_COUNTER_RX_OCTET_256_511 0x11
+#define MIB_COUNTER_RX_OCTET_512_1023 0x12
+#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
+#define MIB_COUNTER_TX_LO_PRIORITY 0x14
+#define MIB_COUNTER_TX_HI_PRIORITY 0x15
+#define MIB_COUNTER_TX_LATE_COLLISION 0x16
+#define MIB_COUNTER_TX_PAUSE 0x17
+#define MIB_COUNTER_TX_BROADCAST 0x18
+#define MIB_COUNTER_TX_MULTICAST 0x19
+#define MIB_COUNTER_TX_UNICAST 0x1A
+#define MIB_COUNTER_TX_DEFERRED 0x1B
+#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
+#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
+#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
+#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
+
+#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
+#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
+
+/**
+ * struct ksz_port_mib - Port MIB data structure
+ * @cnt_ptr: Current pointer to MIB counter index.
+ * @link_down: Indication the link has just gone down.
+ * @state: Connection status of the port.
+ * @mib_start: The starting counter index. Some ports do not start at 0.
+ * @counter: 64-bit MIB counter value.
+ * @dropped: Temporary buffer to remember last read packet dropped values.
+ *
+ * MIB counters needs to be read periodically so that counters do not get
+ * overflowed and give incorrect values. A right balance is needed to
+ * satisfy this condition and not waste too much CPU time.
+ *
+ * It is pointless to read MIB counters when the port is disconnected. The
+ * @state provides the connection status so that MIB counters are read only
+ * when the port is connected. The @link_down indicates the port is just
+ * disconnected so that all MIB counters are read one last time to update the
+ * information.
+ */
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u8 link_down;
+ u8 state;
+ u8 mib_start;
+
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+ u32 dropped[2];
+};
+
+/**
+ * struct ksz_port_cfg - Port configuration data structure
+ * @vid: VID value.
+ * @member: Port membership.
+ * @port_prio: Port priority.
+ * @rx_rate: Receive priority rate.
+ * @tx_rate: Transmit priority rate.
+ * @stp_state: Current Spanning Tree Protocol state.
+ */
+struct ksz_port_cfg {
+ u16 vid;
+ u8 member;
+ u8 port_prio;
+ u32 rx_rate[PRIO_QUEUES];
+ u32 tx_rate[PRIO_QUEUES];
+ int stp_state;
+};
+
+/**
+ * struct ksz_switch - KSZ8842 switch data structure
+ * @mac_table: MAC table entries information.
+ * @vlan_table: VLAN table entries information.
+ * @port_cfg: Port configuration information.
+ * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
+ * (bit7 ~ bit2) field.
+ * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
+ * Tag priority field.
+ * @br_addr: Bridge address. Used for STP.
+ * @other_addr: Other MAC address. Used for multiple network device mode.
+ * @broad_per: Broadcast storm percentage.
+ * @member: Current port membership. Used for STP.
+ */
+struct ksz_switch {
+ struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
+ struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
+ struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
+
+ u8 diffserv[DIFFSERV_ENTRIES];
+ u8 p_802_1p[PRIO_802_1P_ENTRIES];
+
+ u8 br_addr[MAC_ADDR_LEN];
+ u8 other_addr[MAC_ADDR_LEN];
+
+ u8 broad_per;
+ u8 member;
+};
+
+#define TX_RATE_UNIT 10000
+
+/**
+ * struct ksz_port_info - Port information data structure
+ * @state: Connection status of the port.
+ * @tx_rate: Transmit rate divided by 10000 to get Mbit.
+ * @duplex: Duplex mode.
+ * @advertised: Advertised auto-negotiation setting. Used to determine link.
+ * @partner: Auto-negotiation partner setting. Used to determine link.
+ * @port_id: Port index to access actual hardware register.
+ * @pdev: Pointer to OS dependent network device.
+ */
+struct ksz_port_info {
+ uint state;
+ uint tx_rate;
+ u8 duplex;
+ u8 advertised;
+ u8 partner;
+ u8 port_id;
+ void *pdev;
+};
+
+#define MAX_TX_HELD_SIZE 52000
+
+/* Hardware features and bug fixes. */
+#define LINK_INT_WORKING (1 << 0)
+#define SMALL_PACKET_TX_BUG (1 << 1)
+#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
+#define IPV6_CSUM_GEN_HACK (1 << 3)
+#define RX_HUGE_FRAME (1 << 4)
+#define STP_SUPPORT (1 << 8)
+
+/* Software overrides. */
+#define PAUSE_FLOW_CTRL (1 << 0)
+#define FAST_AGING (1 << 1)
+
+/**
+ * struct ksz_hw - KSZ884X hardware data structure
+ * @io: Virtual address assigned.
+ * @ksz_switch: Pointer to KSZ8842 switch.
+ * @port_info: Port information.
+ * @port_mib: Port MIB information.
+ * @dev_count: Number of network devices this hardware supports.
+ * @dst_ports: Destination ports in switch for transmission.
+ * @id: Hardware ID. Used for display only.
+ * @mib_cnt: Number of MIB counters this hardware has.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @tx_cfg: Cached transmit control settings.
+ * @rx_cfg: Cached receive control settings.
+ * @intr_mask: Current interrupt mask.
+ * @intr_set: Current interrup set.
+ * @intr_blocked: Interrupt blocked.
+ * @rx_desc_info: Receive descriptor information.
+ * @tx_desc_info: Transmit descriptor information.
+ * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
+ * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
+ * @tx_size: Transmit data size. Used for TX optimization.
+ * The maximum is defined by MAX_TX_HELD_SIZE.
+ * @perm_addr: Permanent MAC address.
+ * @override_addr: Overrided MAC address.
+ * @address: Additional MAC address entries.
+ * @addr_list_size: Additional MAC address list size.
+ * @mac_override: Indication of MAC address overrided.
+ * @promiscuous: Counter to keep track of promiscuous mode set.
+ * @all_multi: Counter to keep track of all multicast mode set.
+ * @multi_list: Multicast address entries.
+ * @multi_bits: Cached multicast hash table settings.
+ * @multi_list_size: Multicast address list size.
+ * @enabled: Indication of hardware enabled.
+ * @rx_stop: Indication of receive process stop.
+ * @features: Hardware features to enable.
+ * @overrides: Hardware features to override.
+ * @parent: Pointer to parent, network device private structure.
+ */
+struct ksz_hw {
+ void __iomem *io;
+
+ struct ksz_switch *ksz_switch;
+ struct ksz_port_info port_info[SWITCH_PORT_NUM];
+ struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
+ int dev_count;
+ int dst_ports;
+ int id;
+ int mib_cnt;
+ int mib_port_cnt;
+
+ u32 tx_cfg;
+ u32 rx_cfg;
+ u32 intr_mask;
+ u32 intr_set;
+ uint intr_blocked;
+
+ struct ksz_desc_info rx_desc_info;
+ struct ksz_desc_info tx_desc_info;
+
+ int tx_int_cnt;
+ int tx_int_mask;
+ int tx_size;
+
+ u8 perm_addr[MAC_ADDR_LEN];
+ u8 override_addr[MAC_ADDR_LEN];
+ u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 addr_list_size;
+ u8 mac_override;
+ u8 promiscuous;
+ u8 all_multi;
+ u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_bits[HW_MULTICAST_SIZE];
+ u8 multi_list_size;
+
+ u8 enabled;
+ u8 rx_stop;
+ u8 reserved2[1];
+
+ uint features;
+ uint overrides;
+
+ void *parent;
+};
+
+enum {
+ PHY_NO_FLOW_CTRL,
+ PHY_FLOW_CTRL,
+ PHY_TX_ONLY,
+ PHY_RX_ONLY
+};
+
+/**
+ * struct ksz_port - Virtual port data structure
+ * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
+ * duplex, and 0 for auto, which normally results in full
+ * duplex.
+ * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
+ * 0 for auto, which normally results in 100 Mbit.
+ * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
+ * force.
+ * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
+ * control, and PHY_FLOW_CTRL for flow control.
+ * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
+ * Mbit PHY.
+ * @first_port: Index of first port this port supports.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @port_cnt: Number of ports this port supports.
+ * @counter: Port statistics counter.
+ * @hw: Pointer to hardware structure.
+ * @linked: Pointer to port information linked to this port.
+ */
+struct ksz_port {
+ u8 duplex;
+ u8 speed;
+ u8 force_link;
+ u8 flow_ctrl;
+
+ int first_port;
+ int mib_port_cnt;
+ int port_cnt;
+ u64 counter[OID_COUNTER_LAST];
+
+ struct ksz_hw *hw;
+ struct ksz_port_info *linked;
+};
+
+/**
+ * struct ksz_timer_info - Timer information data structure
+ * @timer: Kernel timer.
+ * @cnt: Running timer counter.
+ * @max: Number of times to run timer; -1 for infinity.
+ * @period: Timer period in jiffies.
+ */
+struct ksz_timer_info {
+ struct timer_list timer;
+ int cnt;
+ int max;
+ int period;
+};
+
+/**
+ * struct ksz_shared_mem - OS dependent shared memory data structure
+ * @dma_addr: Physical DMA address allocated.
+ * @alloc_size: Allocation size.
+ * @phys: Actual physical address used.
+ * @alloc_virt: Virtual address allocated.
+ * @virt: Actual virtual address used.
+ */
+struct ksz_shared_mem {
+ dma_addr_t dma_addr;
+ uint alloc_size;
+ uint phys;
+ u8 *alloc_virt;
+ u8 *virt;
+};
+
+/**
+ * struct ksz_counter_info - OS dependent counter information data structure
+ * @counter: Wait queue to wakeup after counters are read.
+ * @time: Next time in jiffies to read counter.
+ * @read: Indication of counters read in full or not.
+ */
+struct ksz_counter_info {
+ wait_queue_head_t counter;
+ unsigned long time;
+ int read;
+};
+
+/**
+ * struct dev_info - Network device information data structure
+ * @dev: Pointer to network device.
+ * @pdev: Pointer to PCI device.
+ * @hw: Hardware structure.
+ * @desc_pool: Physical memory used for descriptor pool.
+ * @hwlock: Spinlock to prevent hardware from accessing.
+ * @lock: Mutex lock to prevent device from accessing.
+ * @dev_rcv: Receive process function used.
+ * @last_skb: Socket buffer allocated for descriptor rx fragments.
+ * @skb_index: Buffer index for receiving fragments.
+ * @skb_len: Buffer length for receiving fragments.
+ * @mib_read: Workqueue to read MIB counters.
+ * @mib_timer_info: Timer to read MIB counters.
+ * @counter: Used for MIB reading.
+ * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
+ * the maximum is MAX_RX_BUF_SIZE.
+ * @opened: Counter to keep track of device open.
+ * @rx_tasklet: Receive processing tasklet.
+ * @tx_tasklet: Transmit processing tasklet.
+ * @wol_enable: Wake-on-LAN enable set by ethtool.
+ * @wol_support: Wake-on-LAN support used by ethtool.
+ * @pme_wait: Used for KSZ8841 power management.
+ */
+struct dev_info {
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct ksz_hw hw;
+ struct ksz_shared_mem desc_pool;
+
+ spinlock_t hwlock;
+ struct mutex lock;
+
+ int (*dev_rcv)(struct dev_info *);
+
+ struct sk_buff *last_skb;
+ int skb_index;
+ int skb_len;
+
+ struct work_struct mib_read;
+ struct ksz_timer_info mib_timer_info;
+ struct ksz_counter_info counter[TOTAL_PORT_NUM];
+
+ int mtu;
+ int opened;
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ int wol_enable;
+ int wol_support;
+ unsigned long pme_wait;
+};
+
+/**
+ * struct dev_priv - Network device private data structure
+ * @adapter: Adapter device information.
+ * @port: Port information.
+ * @monitor_time_info: Timer to monitor ports.
+ * @stats: Network statistics.
+ * @proc_sem: Semaphore for proc accessing.
+ * @id: Device ID.
+ * @mii_if: MII interface information.
+ * @advertising: Temporary variable to store advertised settings.
+ * @msg_enable: The message flags controlling driver output.
+ * @media_state: The connection status of the device.
+ * @multicast: The all multicast state of the device.
+ * @promiscuous: The promiscuous state of the device.
+ */
+struct dev_priv {
+ struct dev_info *adapter;
+ struct ksz_port port;
+ struct ksz_timer_info monitor_timer_info;
+ struct net_device_stats stats;
+
+ struct semaphore proc_sem;
+ int id;
+
+ struct mii_if_info mii_if;
+ u32 advertising;
+
+ u32 msg_enable;
+ int media_state;
+ int multicast;
+ int promiscuous;
+};
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define DRV_NAME "KSZ884X PCI"
+#define DEVICE_NAME "KSZ884x PCI"
+#define DRV_VERSION "1.0.0"
+#define DRV_RELDATE "Feb 8, 2010"
+
+static char version[] __devinitdata =
+ "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
+
+static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
+
+/*
+ * Interrupt processing primary routines
+ */
+
+static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
+{
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
+}
+
+static inline void hw_dis_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = hw->intr_mask;
+ writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
+{
+ hw->intr_set = interrupt;
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_ena_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = 0;
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
+{
+ hw->intr_mask &= ~(bit);
+}
+
+static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr & ~interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw_dis_intr_bit(hw, interrupt);
+}
+
+/**
+ * hw_turn_on_intr - turn on specified interrupts
+ * @hw: The hardware instance.
+ * @bit: The interrupt bits to be on.
+ *
+ * This routine turns on the specified interrupts in the interrupt mask so that
+ * those interrupts will be enabled.
+ */
+static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
+{
+ hw->intr_mask |= bit;
+
+ if (!hw->intr_blocked)
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr | interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
+{
+ *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
+ *status = *status & hw->intr_set;
+}
+
+static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
+{
+ if (interrupt)
+ hw_ena_intr(hw);
+}
+
+/**
+ * hw_block_intr - block hardware interrupts
+ *
+ * This function blocks all interrupts of the hardware and returns the current
+ * interrupt enable mask so that interrupts can be restored later.
+ *
+ * Return the current interrupt enable mask.
+ */
+static uint hw_block_intr(struct ksz_hw *hw)
+{
+ uint interrupt = 0;
+
+ if (!hw->intr_blocked) {
+ hw_dis_intr(hw);
+ interrupt = hw->intr_blocked;
+ }
+ return interrupt;
+}
+
+/*
+ * Hardware descriptor routines
+ */
+
+static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
+{
+ status.rx.hw_owned = 0;
+ desc->phw->ctrl.data = cpu_to_le32(status.data);
+}
+
+static inline void release_desc(struct ksz_desc *desc)
+{
+ desc->sw.ctrl.tx.hw_owned = 1;
+ if (desc->sw.buf_size != desc->sw.buf.data) {
+ desc->sw.buf_size = desc->sw.buf.data;
+ desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
+ }
+ desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
+}
+
+static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->last];
+ info->last++;
+ info->last &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
+}
+
+static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_rx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.rx.buf_size = len;
+}
+
+static inline void get_tx_pkt(struct ksz_desc_info *info,
+ struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->next];
+ info->next++;
+ info->next &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
+}
+
+static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_tx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.tx.buf_size = len;
+}
+
+/* Switch functions */
+
+#define TABLE_READ 0x10
+#define TABLE_SEL_SHIFT 2
+
+#define HW_DELAY(hw, reg) \
+ do { \
+ u16 dummy; \
+ dummy = readw(hw->io + reg); \
+ } while (0)
+
+/**
+ * sw_r_table - read 4 bytes of data from switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data: Buffer to store the read data.
+ *
+ * This routine reads 4 bytes of data from the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_table_64 - write 8 bytes of data to the switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data_hi: The high part of data to be written (bit63 ~ bit32).
+ * @data_lo: The low part of data to be written (bit31 ~ bit0).
+ *
+ * This routine writes 8 bytes of data to the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of written data.
+ */
+static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
+ u32 data_lo)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
+ writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_sta_mac_table - write to the static MAC table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @mac_addr: The MAC address.
+ * @ports: The port members.
+ * @override: The flag to override the port receive/transmit settings.
+ * @valid: The flag to indicate entry is valid.
+ * @use_fid: The flag to indicate the FID is valid.
+ * @fid: The FID value.
+ *
+ * This routine writes an entry of the static MAC table of the switch. It
+ * calls sw_w_table_64() to write the data.
+ */
+static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
+ u8 ports, int override, int valid, int use_fid, u8 fid)
+{
+ u32 data_hi;
+ u32 data_lo;
+
+ data_lo = ((u32) mac_addr[2] << 24) |
+ ((u32) mac_addr[3] << 16) |
+ ((u32) mac_addr[4] << 8) | mac_addr[5];
+ data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
+ data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
+
+ if (override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
+ }
+ if (valid)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+
+ sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
+}
+
+/**
+ * sw_r_vlan_table - read from the VLAN table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @vid: Buffer to store the VID.
+ * @fid: Buffer to store the VID.
+ * @member: Buffer to store the port membership.
+ *
+ * This function reads an entry of the VLAN table of the switch. It calls
+ * sw_r_table() to get the data.
+ *
+ * Return 0 if the entry is valid; otherwise -1.
+ */
+static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
+ u8 *member)
+{
+ u32 data;
+
+ sw_r_table(hw, TABLE_VLAN, addr, &data);
+ if (data & VLAN_TABLE_VALID) {
+ *vid = (u16)(data & VLAN_TABLE_VID);
+ *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
+ *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
+ VLAN_TABLE_MEMBERSHIP_SHIFT);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * port_r_mib_cnt - read MIB counter
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @addr: The address of the counter.
+ * @cnt: Buffer to store the counter.
+ *
+ * This routine reads a MIB counter of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
+{
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int timeout;
+
+ ctrl_addr = addr + PORT_COUNTER_NUM * port;
+
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ for (timeout = 100; timeout > 0; timeout--) {
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ if (data & MIB_COUNTER_VALID) {
+ if (data & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * port_r_mib_pkt - read dropped packet counts
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @cnt: Buffer to store the receive and transmit dropped packet counts.
+ *
+ * This routine reads the dropped packet counts of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
+{
+ u32 cur;
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int index;
+
+ index = KS_MIB_PACKET_DROPPED_RX_0 + port;
+ do {
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr = (u16) index;
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
+ << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = *last;
+ if (data != cur) {
+ *last = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+ ++last;
+ ++cnt;
+ index -= KS_MIB_PACKET_DROPPED_TX -
+ KS_MIB_PACKET_DROPPED_TX_0 + 1;
+ } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
+}
+
+/**
+ * port_r_cnt - read MIB counters periodically
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to read the counters of the port periodically to avoid
+ * counter overflow. The hardware should be acquired first before calling this
+ * routine.
+ *
+ * Return non-zero when not all counters not read.
+ */
+static int port_r_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ while (mib->cnt_ptr < PORT_COUNTER_NUM) {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ mib->cnt_ptr = 0;
+ return 0;
+}
+
+/**
+ * port_init_cnt - initialize MIB counter values
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to initialize all counters to zero if the hardware
+ * cannot do it after reset.
+ */
+static void port_init_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ mib->cnt_ptr = 0;
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ do {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ } while (mib->cnt_ptr < PORT_COUNTER_NUM);
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ mib->cnt_ptr = 0;
+}
+
+/*
+ * Port functions
+ */
+
+/**
+ * port_chk - check port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the port register are set
+ * or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * port_cfg - set port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This routine sets or resets the specified bits of the port register.
+ */
+static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
+ int set)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_chk_shift - check port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ *
+ * This function checks whether the specified port is set in the register or
+ * not.
+ *
+ * Return 0 if the port is not set.
+ */
+static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
+{
+ u16 data;
+ u16 bit = 1 << port;
+
+ data = readw(hw->io + addr);
+ data >>= shift;
+ return (data & bit) == bit;
+}
+
+/**
+ * port_cfg_shift - set port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ * @set: The flag indicating whether the port is to be set or not.
+ *
+ * This routine sets or resets the specified port in the register.
+ */
+static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
+ int set)
+{
+ u16 data;
+ u16 bits = 1 << port;
+
+ data = readw(hw->io + addr);
+ bits <<= shift;
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_r8 - read byte from port register
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a byte from the port register.
+ */
+static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readb(hw->io + addr);
+}
+
+/**
+ * port_r16 - read word from port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a word from the port register.
+ */
+static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readw(hw->io + addr);
+}
+
+/**
+ * port_w16 - write word to port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Data to write.
+ *
+ * This routine writes a word to the port register.
+ */
+static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * sw_chk - check switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the switch register are
+ * set or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * sw_cfg - set switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This function sets or resets the specified bits of the switch register.
+ */
+static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/* Bandwidth */
+
+static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
+}
+
+static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
+}
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROTECTION_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * sw_cfg_broad_storm - configure broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ */
+static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ u16 data;
+ u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
+
+ if (value > BROADCAST_STORM_RATE)
+ value = BROADCAST_STORM_RATE;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
+ data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+}
+
+/**
+ * sw_get_board_storm - get broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Buffer to store the broadcast storm threshold percentage.
+ *
+ * This routine retrieves the broadcast storm threshold of the switch.
+ */
+static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
+{
+ int num;
+ u16 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ num = (data & BROADCAST_STORM_RATE_HI);
+ num <<= 8;
+ num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
+ num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
+ *percent = (u8) num;
+}
+
+/**
+ * sw_dis_broad_storm - disable broadstorm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the broadcast storm limit function of the switch.
+ */
+static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
+{
+ port_cfg_broad_storm(hw, port, 0);
+}
+
+/**
+ * sw_ena_broad_storm - enable broadcast storm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine enables the broadcast storm limit function of the switch.
+ */
+static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
+{
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ port_cfg_broad_storm(hw, port, 1);
+}
+
+/**
+ * sw_init_broad_storm - initialize broadcast storm
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the broadcast storm limit function of the switch.
+ */
+static void sw_init_broad_storm(struct ksz_hw *hw)
+{
+ int port;
+
+ hw->ksz_switch->broad_per = 1;
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ for (port = 0; port < TOTAL_PORT_NUM; port++)
+ sw_dis_broad_storm(hw, port);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
+}
+
+/**
+ * hw_cfg_broad_storm - configure broadcast storm
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ * It is called by user functions. The hardware should be acquired first.
+ */
+static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ if (percent > 100)
+ percent = 100;
+
+ sw_cfg_broad_storm(hw, percent);
+ sw_get_broad_storm(hw, &percent);
+ hw->ksz_switch->broad_per = percent;
+}
+
+/**
+ * sw_dis_prio_rate - disable switch priority rate
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the priority rate function of the switch.
+ */
+static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_IN_RATE_OFFSET;
+ writel(0, hw->io + addr);
+}
+
+/**
+ * sw_init_prio_rate - initialize switch prioirty rate
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the priority rate function of the switch.
+ */
+static void sw_init_prio_rate(struct ksz_hw *hw)
+{
+ int port;
+ int prio;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ for (prio = 0; prio < PRIO_QUEUES; prio++) {
+ sw->port_cfg[port].rx_rate[prio] =
+ sw->port_cfg[port].tx_rate[prio] = 0;
+ }
+ sw_dis_prio_rate(hw, port);
+ }
+}
+
+/* Communication */
+
+static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
+}
+
+static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
+}
+
+static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
+}
+
+static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
+}
+
+/* Spanning Tree */
+
+static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
+}
+
+static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
+}
+
+static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
+}
+
+static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
+}
+
+static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
+{
+ if (!(hw->overrides & FAST_AGING)) {
+ sw_cfg_fast_aging(hw, 1);
+ mdelay(1);
+ sw_cfg_fast_aging(hw, 0);
+ }
+}
+
+/* VLAN */
+
+static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
+}
+
+static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
+}
+
+static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
+}
+
+static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
+}
+
+static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
+}
+
+static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
+}
+
+static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
+}
+
+static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
+}
+
+/* Mirroring */
+
+static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
+}
+
+static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
+}
+
+static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
+}
+
+static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
+}
+
+static void sw_init_mirror(struct ksz_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_cfg_mirror_sniffer(hw, port, 0);
+ port_cfg_mirror_rx(hw, port, 0);
+ port_cfg_mirror_tx(hw, port, 0);
+ }
+ sw_cfg_mirror_rx_tx(hw, 0);
+}
+
+static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE, set);
+}
+
+static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
+{
+ return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE);
+}
+
+static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
+}
+
+static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
+{
+ return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
+}
+
+/* Priority */
+
+static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
+}
+
+static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
+}
+
+static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
+}
+
+static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
+}
+
+static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
+}
+
+static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
+}
+
+static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
+}
+
+static inline int port_chk_prio(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
+}
+
+/**
+ * sw_dis_diffserv - disable switch DiffServ priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the DiffServ priority function of the switch.
+ */
+static void sw_dis_diffserv(struct ksz_hw *hw, int port)
+{
+ port_cfg_diffserv(hw, port, 0);
+}
+
+/**
+ * sw_dis_802_1p - disable switch 802.1p priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the 802.1p priority function of the switch.
+ */
+static void sw_dis_802_1p(struct ksz_hw *hw, int port)
+{
+ port_cfg_802_1p(hw, port, 0);
+}
+
+/**
+ * sw_cfg_replace_null_vid -
+ * @hw: The hardware instance.
+ * @set: The flag to disable or enable.
+ *
+ */
+static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
+}
+
+/**
+ * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @set: The flag to disable or enable.
+ *
+ * This routine enables the 802.1p priority re-mapping function of the switch.
+ * That allows 802.1p priority field to be replaced with the port's default
+ * tag's priority value if the ingress packet's 802.1p priority has a higher
+ * priority than port's default tag's priority.
+ */
+static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_replace_vid(hw, port, set);
+}
+
+/**
+ * sw_cfg_port_based - configure switch port based priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @prio: The priority to set.
+ *
+ * This routine configures the port based priority of the switch.
+ */
+static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
+{
+ u16 data;
+
+ if (prio > PORT_BASED_PRIORITY_BASE)
+ prio = PORT_BASED_PRIORITY_BASE;
+
+ hw->ksz_switch->port_cfg[port].port_prio = prio;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
+ data &= ~PORT_BASED_PRIORITY_MASK;
+ data |= prio << PORT_BASED_PRIORITY_SHIFT;
+ port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
+}
+
+/**
+ * sw_dis_multi_queue - disable transmit multiple queues
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the transmit multiple queues selection of the switch
+ * port. Only single transmit queue on the port.
+ */
+static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
+{
+ port_cfg_prio(hw, port, 0);
+}
+
+/**
+ * sw_init_prio - initialize switch priority
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the switch QoS priority functions.
+ */
+static void sw_init_prio(struct ksz_hw *hw)
+{
+ int port;
+ int tos;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /*
+ * Init all the 802.1p tag priority value to be assigned to different
+ * priority queue.
+ */
+ sw->p_802_1p[0] = 0;
+ sw->p_802_1p[1] = 0;
+ sw->p_802_1p[2] = 1;
+ sw->p_802_1p[3] = 1;
+ sw->p_802_1p[4] = 2;
+ sw->p_802_1p[5] = 2;
+ sw->p_802_1p[6] = 3;
+ sw->p_802_1p[7] = 3;
+
+ /*
+ * Init all the DiffServ priority value to be assigned to priority
+ * queue 0.
+ */
+ for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
+ sw->diffserv[tos] = 0;
+
+ /* All QoS functions disabled. */
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ sw_dis_multi_queue(hw, port);
+ sw_dis_diffserv(hw, port);
+ sw_dis_802_1p(hw, port);
+ sw_cfg_replace_vid(hw, port, 0);
+
+ sw->port_cfg[port].port_prio = 0;
+ sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
+ }
+ sw_cfg_replace_null_vid(hw, 0);
+}
+
+/**
+ * port_get_def_vid - get port default VID.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @vid: Buffer to store the VID.
+ *
+ * This routine retrieves the default VID of the port.
+ */
+static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_VID_OFFSET;
+ *vid = readw(hw->io + addr);
+}
+
+/**
+ * sw_init_vlan - initialize switch VLAN
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the VLAN function of the switch.
+ */
+static void sw_init_vlan(struct ksz_hw *hw)
+{
+ int port;
+ int entry;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* Read 16 VLAN entries from device's VLAN table. */
+ for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
+ sw_r_vlan_table(hw, entry,
+ &sw->vlan_table[entry].vid,
+ &sw->vlan_table[entry].fid,
+ &sw->vlan_table[entry].member);
+ }
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
+ sw->port_cfg[port].member = PORT_MASK;
+ }
+}
+
+/**
+ * sw_cfg_port_base_vlan - configure port-based VLAN membership
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @member: The port-based VLAN membership.
+ *
+ * This routine configures the port-based VLAN membership of the port.
+ */
+static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
+{
+ u32 addr;
+ u8 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_2_OFFSET;
+
+ data = readb(hw->io + addr);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & PORT_MASK);
+ writeb(data, hw->io + addr);
+
+ hw->ksz_switch->port_cfg[port].member = member;
+}
+
+/**
+ * sw_get_addr - get the switch MAC address.
+ * @hw: The hardware instance.
+ * @mac_addr: Buffer to store the MAC address.
+ *
+ * This function retrieves the MAC address of the switch.
+ */
+static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_addr - configure switch MAC address
+ * @hw: The hardware instance.
+ * @mac_addr: The MAC address.
+ *
+ * This function configures the MAC address of the switch.
+ */
+static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_global_ctrl - set switch global control
+ * @hw: The hardware instance.
+ *
+ * This routine sets the global control of the switch function.
+ */
+static void sw_set_global_ctrl(struct ksz_hw *hw)
+{
+ u16 data;
+
+ /* Enable switch MII flow control. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data |= SWITCH_FLOW_CTRL;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ data |= SWITCH_AGGR_BACKOFF;
+
+ /* Enable automatic fast aging when link changed detected. */
+ data |= SWITCH_AGING_ENABLE;
+ data |= SWITCH_LINK_AUTO_AGING;
+
+ if (hw->overrides & FAST_AGING)
+ data |= SWITCH_FAST_AGING;
+ else
+ data &= ~SWITCH_FAST_AGING;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+
+ /* Enable no excessive collision drop. */
+ data |= NO_EXC_COLLISION_DROP;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+}
+
+enum {
+ STP_STATE_DISABLED = 0,
+ STP_STATE_LISTENING,
+ STP_STATE_LEARNING,
+ STP_STATE_FORWARDING,
+ STP_STATE_BLOCKED,
+ STP_STATE_SIMPLE
+};
+
+/**
+ * port_set_stp_state - configure port spanning tree state
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @state: The spanning tree state.
+ *
+ * This routine configures the spanning tree state of the port.
+ */
+static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
+{
+ u16 data;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
+ switch (state) {
+ case STP_STATE_DISABLED:
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LISTENING:
+/*
+ * No need to turn on transmit because of port direct mode.
+ * Turning on receive is required if static MAC table is not setup.
+ */
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LEARNING:
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_BLOCKED:
+/*
+ * Need to setup static MAC table with override to keep receiving BPDU
+ * messages. See sw_init_stp routine.
+ */
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_SIMPLE:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ }
+ port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
+ hw->ksz_switch->port_cfg[port].stp_state = state;
+}
+
+#define STP_ENTRY 0
+#define BROADCAST_ENTRY 1
+#define BRIDGE_ADDR_ENTRY 2
+#define IPV6_ADDR_ENTRY 3
+
+/**
+ * sw_clr_sta_mac_table - clear static MAC table
+ * @hw: The hardware instance.
+ *
+ * This routine clears the static MAC table.
+ */
+static void sw_clr_sta_mac_table(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, 0,
+ entry->use_fid, entry->fid);
+ }
+}
+
+/**
+ * sw_init_stp - initialize switch spanning tree support
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the spanning tree support of the switch.
+ */
+static void sw_init_stp(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+
+ entry = &hw->ksz_switch->mac_table[STP_ENTRY];
+ entry->mac_addr[0] = 0x01;
+ entry->mac_addr[1] = 0x80;
+ entry->mac_addr[2] = 0xC2;
+ entry->mac_addr[3] = 0x00;
+ entry->mac_addr[4] = 0x00;
+ entry->mac_addr[5] = 0x00;
+ entry->ports = HOST_MASK;
+ entry->override = 1;
+ entry->valid = 1;
+ sw_w_sta_mac_table(hw, STP_ENTRY,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+}
+
+/**
+ * sw_block_addr - block certain packets from the host port
+ * @hw: The hardware instance.
+ *
+ * This routine blocks certain packets from reaching to the host port.
+ */
+static void sw_block_addr(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ entry->valid = 0;
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+ }
+}
+
+#define PHY_LINK_SUPPORT \
+ (PHY_AUTO_NEG_ASYM_PAUSE | \
+ PHY_AUTO_NEG_SYM_PAUSE | \
+ PHY_AUTO_NEG_100BT4 | \
+ PHY_AUTO_NEG_100BTX_FD | \
+ PHY_AUTO_NEG_100BTX | \
+ PHY_AUTO_NEG_10BT_FD | \
+ PHY_AUTO_NEG_10BT)
+
+static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
+}
+
+static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
+}
+
+static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+/**
+ * hw_r_phy - read data from PHY register
+ * @hw: The hardware instance.
+ * @port: Port to read.
+ * @reg: PHY register to read.
+ * @val: Buffer to store the read data.
+ *
+ * This routine reads data from the PHY register.
+ */
+static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ *val = readw(hw->io + phy);
+}
+
+/**
+ * port_w_phy - write data to PHY register
+ * @hw: The hardware instance.
+ * @port: Port to write.
+ * @reg: PHY register to write.
+ * @val: Word data to write.
+ *
+ * This routine writes data to the PHY register.
+ */
+static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ writew(val, hw->io + phy);
+}
+
+/*
+ * EEPROM access functions
+ */
+
+#define AT93C_CODE 0
+#define AT93C_WR_OFF 0x00
+#define AT93C_WR_ALL 0x10
+#define AT93C_ER_ALL 0x20
+#define AT93C_WR_ON 0x30
+
+#define AT93C_WRITE 1
+#define AT93C_READ 2
+#define AT93C_ERASE 3
+
+#define EEPROM_DELAY 4
+
+static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data &= ~gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data |= gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ return (u8)(data & gpio);
+}
+
+static void eeprom_clk(struct ksz_hw *hw)
+{
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+}
+
+static u16 spi_r(struct ksz_hw *hw)
+{
+ int i;
+ u16 temp = 0;
+
+ for (i = 15; i >= 0; i--) {
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+
+ temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
+
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ }
+ return temp;
+}
+
+static void spi_w(struct ksz_hw *hw, u16 data)
+{
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
+{
+ int i;
+
+ /* Initial start bit */
+ raise_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+
+ /* AT93C operation */
+ for (i = 1; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+
+ /* Address location */
+ for (i = 5; i >= 0; i--) {
+ (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+#define EEPROM_DATA_RESERVED 0
+#define EEPROM_DATA_MAC_ADDR_0 1
+#define EEPROM_DATA_MAC_ADDR_1 2
+#define EEPROM_DATA_MAC_ADDR_2 3
+#define EEPROM_DATA_SUBSYS_ID 4
+#define EEPROM_DATA_SUBSYS_VEN_ID 5
+#define EEPROM_DATA_PM_CAP 6
+
+/* User defined EEPROM data */
+#define EEPROM_DATA_OTHER_MAC_ADDR 9
+
+/**
+ * eeprom_read - read from AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ *
+ * This function reads a word from the AT93C46 EEPROM.
+ *
+ * Return the data value.
+ */
+static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
+{
+ u16 data;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ spi_reg(hw, AT93C_READ, reg);
+ data = spi_r(hw);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ return data;
+}
+
+/**
+ * eeprom_write - write to AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ * @data: The data value.
+ *
+ * This procedure writes a word to the AT93C46 EEPROM.
+ */
+static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
+{
+ int timeout;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ /* Enable write. */
+ spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Erase the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_ERASE, reg);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Write the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_WRITE, reg);
+ spi_w(hw, data);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Disable write. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+}
+
+/*
+ * Link detection routines
+ */
+
+static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
+{
+ ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
+ switch (port->flow_ctrl) {
+ case PHY_FLOW_CTRL:
+ ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
+ break;
+ /* Not supported. */
+ case PHY_TX_ONLY:
+ case PHY_RX_ONLY:
+ default:
+ break;
+ }
+ return ctrl;
+}
+
+static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
+{
+ u32 rx_cfg;
+ u32 tx_cfg;
+
+ rx_cfg = hw->rx_cfg;
+ tx_cfg = hw->tx_cfg;
+ if (rx)
+ hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
+ else
+ hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
+ if (tx)
+ hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
+ else
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled) {
+ if (rx_cfg != hw->rx_cfg)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ if (tx_cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
+ u16 local, u16 remote)
+{
+ int rx;
+ int tx;
+
+ if (hw->overrides & PAUSE_FLOW_CTRL)
+ return;
+
+ rx = tx = 0;
+ if (port->force_link)
+ rx = tx = 1;
+ if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ rx = tx = 1;
+ } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
+ (local & PHY_AUTO_NEG_PAUSE) ==
+ PHY_AUTO_NEG_ASYM_PAUSE) {
+ tx = 1;
+ }
+ } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
+ if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ rx = 1;
+ }
+ if (!hw->ksz_switch)
+ set_flow_ctrl(hw, rx, tx);
+}
+
+static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
+ struct ksz_port_info *info, u16 link_status)
+{
+ if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
+ !(hw->overrides & PAUSE_FLOW_CTRL)) {
+ u32 cfg = hw->tx_cfg;
+
+ /* Disable flow control in the half duplex mode. */
+ if (1 == info->duplex)
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled && cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+/**
+ * port_get_link_speed - get current link status
+ * @port: The port instance.
+ *
+ * This routine reads PHY registers to determine the current link status of the
+ * switch ports.
+ */
+static void port_get_link_speed(struct ksz_port *port)
+{
+ uint interrupt;
+ struct ksz_port_info *info;
+ struct ksz_port_info *linked = NULL;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 status;
+ u8 local;
+ u8 remote;
+ int i;
+ int p;
+ int change = 0;
+
+ interrupt = hw_block_intr(hw);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ /*
+ * Link status is changing all the time even when there is no
+ * cable connection!
+ */
+ remote = status & (PORT_AUTO_NEG_COMPLETE |
+ PORT_STATUS_LINK_GOOD);
+ local = (u8) data;
+
+ /* No change to status. */
+ if (local == info->advertised && remote == info->partner)
+ continue;
+
+ info->advertised = local;
+ info->partner = remote;
+ if (status & PORT_STATUS_LINK_GOOD) {
+
+ /* Remember the first linked port. */
+ if (!linked)
+ linked = info;
+
+ info->tx_rate = 10 * TX_RATE_UNIT;
+ if (status & PORT_STATUS_SPEED_100MBIT)
+ info->tx_rate = 100 * TX_RATE_UNIT;
+
+ info->duplex = 1;
+ if (status & PORT_STATUS_FULL_DUPLEX)
+ info->duplex = 2;
+
+ if (media_connected != info->state) {
+ hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
+ &data);
+ hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
+ &status);
+ determine_flow_ctrl(hw, port, data, status);
+ if (hw->ksz_switch) {
+ port_cfg_back_pressure(hw, p,
+ (1 == info->duplex));
+ }
+ change |= 1 << i;
+ port_cfg_change(hw, port, info, status);
+ }
+ info->state = media_connected;
+ } else {
+ if (media_disconnected != info->state) {
+ change |= 1 << i;
+
+ /* Indicate the link just goes down. */
+ hw->port_mib[p].link_down = 1;
+ }
+ info->state = media_disconnected;
+ }
+ hw->port_mib[p].state = (u8) info->state;
+ }
+
+ if (linked && media_disconnected == port->linked->state)
+ port->linked = linked;
+
+ hw_restore_intr(hw, interrupt);
+}
+
+#define PHY_RESET_TIMEOUT 10
+
+/**
+ * port_set_link_speed - set port speed
+ * @port: The port instance.
+ *
+ * This routine sets the link speed of the switch ports.
+ */
+static void port_set_link_speed(struct ksz_port *port)
+{
+ struct ksz_port_info *info;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 cfg;
+ u8 status;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ cfg = 0;
+ if (status & PORT_STATUS_LINK_GOOD)
+ cfg = data;
+
+ data |= PORT_AUTO_NEG_ENABLE;
+ data = advertised_flow_ctrl(port, data);
+
+ data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
+
+ /* Check if manual configuration is specified by the user. */
+ if (port->speed || port->duplex) {
+ if (10 == port->speed)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX);
+ else if (100 == port->speed)
+ data &= ~(PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (1 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_10BT_FD);
+ else if (2 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT);
+ }
+ if (data != cfg) {
+ data |= PORT_AUTO_NEG_RESTART;
+ port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
+ }
+ }
+}
+
+/**
+ * port_force_link_speed - force port speed
+ * @port: The port instance.
+ *
+ * This routine forces the link speed of the switch ports.
+ */
+static void port_force_link_speed(struct ksz_port *port)
+{
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ int i;
+ int phy;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
+ hw_r_phy_ctrl(hw, phy, &data);
+
+ data &= ~PHY_AUTO_NEG_ENABLE;
+
+ if (10 == port->speed)
+ data &= ~PHY_SPEED_100MBIT;
+ else if (100 == port->speed)
+ data |= PHY_SPEED_100MBIT;
+ if (1 == port->duplex)
+ data &= ~PHY_FULL_DUPLEX;
+ else if (2 == port->duplex)
+ data |= PHY_FULL_DUPLEX;
+ hw_w_phy_ctrl(hw, phy, data);
+ }
+}
+
+static void port_set_power_saving(struct ksz_port *port, int enable)
+{
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
+ port_cfg(hw, p,
+ KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
+}
+
+/*
+ * KSZ8841 power management functions
+ */
+
+/**
+ * hw_chk_wol_pme_status - check PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This function is used to check PMEN pin is asserted.
+ *
+ * Return 1 if PMEN pin is asserted; otherwise, 0.
+ */
+static int hw_chk_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return 0;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
+}
+
+/**
+ * hw_clr_wol_pme_status - clear PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This routine is used to clear PME_Status to deassert PMEN pin.
+ */
+static void hw_clr_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+
+ /* Clear PME_Status to deassert PMEN pin. */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data |= PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol_pme - enable or disable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable Wake-on-LAN.
+ */
+static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data &= ~PCI_PM_CTRL_STATE_MASK;
+ if (set)
+ data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
+ else
+ data &= ~PCI_PM_CTRL_PME_ENABLE;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol - configure Wake-on-LAN features
+ * @hw: The hardware instance.
+ * @frame: The pattern frame bit.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable certain Wake-on-LAN features.
+ */
+static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
+ if (set)
+ data |= frame;
+ else
+ data &= ~frame;
+ writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
+}
+
+/**
+ * hw_set_wol_frame - program Wake-on-LAN pattern
+ * @hw: The hardware instance.
+ * @i: The frame index.
+ * @mask_size: The size of the mask.
+ * @mask: Mask to ignore certain bytes in the pattern.
+ * @frame_size: The size of the frame.
+ * @pattern: The frame data.
+ *
+ * This routine is used to program Wake-on-LAN pattern.
+ */
+static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
+ u8 *mask, uint frame_size, u8 *pattern)
+{
+ int bits;
+ int from;
+ int len;
+ int to;
+ u32 crc;
+ u8 data[64];
+ u8 val = 0;
+
+ if (frame_size > mask_size * 8)
+ frame_size = mask_size * 8;
+ if (frame_size > 64)
+ frame_size = 64;
+
+ i *= 0x10;
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
+
+ bits = len = from = to = 0;
+ do {
+ if (bits) {
+ if ((val & 1))
+ data[to++] = pattern[from];
+ val >>= 1;
+ ++from;
+ --bits;
+ } else {
+ val = mask[len];
+ writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ + len);
+ ++len;
+ if (val)
+ bits = 8;
+ else
+ from += 8;
+ }
+ } while (from < (int) frame_size);
+ if (val) {
+ bits = mask[len - 1];
+ val <<= (from % 8);
+ bits &= ~val;
+ writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
+ 1);
+ }
+ crc = ether_crc(to, data);
+ writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
+}
+
+/**
+ * hw_add_wol_arp - add ARP pattern
+ * @hw: The hardware instance.
+ * @ip_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to add ARP pattern for waking up the host.
+ */
+static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+{
+ u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ u8 pattern[42] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x06,
+ 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[38], ip_addr, 4);
+ hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
+}
+
+/**
+ * hw_add_wol_bcast - add broadcast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add broadcast pattern for waking up the host.
+ */
+static void hw_add_wol_bcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+}
+
+/**
+ * hw_add_wol_mcast - add multicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add multicast pattern for waking up the host.
+ *
+ * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
+ * by IPv6 ping command. Note that multicast packets are filtred through the
+ * multicast hash table, so not all multicast packets can wake up the host.
+ */
+static void hw_add_wol_mcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[3], &hw->override_addr[3], 3);
+ hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
+}
+
+/**
+ * hw_add_wol_ucast - add unicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add unicast pattern to wakeup the host.
+ *
+ * It is assumed the unicast packet is directed to the device, as the hardware
+ * can only receive them in normal case.
+ */
+static void hw_add_wol_ucast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+
+ hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+}
+
+/**
+ * hw_enable_wol - enable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @wol_enable: The Wake-on-LAN settings.
+ * @net_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to enable Wake-on-LAN depending on driver settings.
+ */
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+{
+ hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
+ hw_add_wol_ucast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
+ hw_add_wol_mcast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
+ hw_add_wol_arp(hw, net_addr);
+}
+
+/**
+ * hw_init - check driver is correct for the hardware
+ * @hw: The hardware instance.
+ *
+ * This function checks the hardware is correct for this driver and sets the
+ * hardware up for proper initialization.
+ *
+ * Return number of ports or 0 if not right.
+ */
+static int hw_init(struct ksz_hw *hw)
+{
+ int rc = 0;
+ u16 data;
+ u16 revision;
+
+ /* Set bus speed to 125MHz. */
+ writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
+
+ /* Check KSZ884x chip ID. */
+ data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
+
+ revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
+ data &= KS884X_CHIP_ID_MASK_41;
+ if (REG_CHIP_ID_41 == data)
+ rc = 1;
+ else if (REG_CHIP_ID_42 == data)
+ rc = 2;
+ else
+ return 0;
+
+ /* Setup hardware features or bug workarounds. */
+ if (revision <= 1) {
+ hw->features |= SMALL_PACKET_TX_BUG;
+ if (1 == rc)
+ hw->features |= HALF_DUPLEX_SIGNAL_BUG;
+ }
+ hw->features |= IPV6_CSUM_GEN_HACK;
+ return rc;
+}
+
+/**
+ * hw_reset - reset the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine resets the hardware.
+ */
+static void hw_reset(struct ksz_hw *hw)
+{
+ writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+
+ /* Wait for device to reset. */
+ mdelay(10);
+
+ /* Write 0 to clear device reset. */
+ writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+}
+
+/**
+ * hw_setup - setup the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware for proper operation.
+ */
+static void hw_setup(struct ksz_hw *hw)
+{
+#if SET_DEFAULT_LED
+ u16 data;
+
+ /* Change default LED mode. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+ data &= ~LED_MODE;
+ data |= SET_DEFAULT_LED;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+#endif
+
+ /* Setup transmit control. */
+ hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
+
+ /* Setup receive control. */
+ hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
+ hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
+
+ /* Hardware cannot handle UDP packet in IP fragments. */
+ hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->all_multi)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ if (hw->promiscuous)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+}
+
+/**
+ * hw_setup_intr - setup interrupt mask
+ * @hw: The hardware instance.
+ *
+ * This routine setup the interrupt mask for proper operation.
+ */
+static void hw_setup_intr(struct ksz_hw *hw)
+{
+ hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
+}
+
+static void ksz_check_desc_num(struct ksz_desc_info *info)
+{
+#define MIN_DESC_SHIFT 2
+
+ int alloc = info->alloc;
+ int shift;
+
+ shift = 0;
+ while (!(alloc & 1)) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (alloc != 1 || shift < MIN_DESC_SHIFT) {
+ printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ while (alloc) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (shift < MIN_DESC_SHIFT)
+ shift = MIN_DESC_SHIFT;
+ alloc = 1 << shift;
+ info->alloc = alloc;
+ }
+ info->mask = info->alloc - 1;
+}
+
+static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ int i;
+ u32 phys = desc_info->ring_phys;
+ struct ksz_hw_desc *desc = desc_info->ring_virt;
+ struct ksz_desc *cur = desc_info->ring;
+ struct ksz_desc *previous = NULL;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ cur->phw = desc++;
+ phys += desc_info->size;
+ previous = cur++;
+ previous->phw->next = cpu_to_le32(phys);
+ }
+ previous->phw->next = cpu_to_le32(desc_info->ring_phys);
+ previous->sw.buf.rx.end_of_ring = 1;
+ previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
+
+ desc_info->avail = desc_info->alloc;
+ desc_info->last = desc_info->next = 0;
+
+ desc_info->cur = desc_info->ring;
+}
+
+/**
+ * hw_set_desc_base - set descriptor base addresses
+ * @hw: The hardware instance.
+ * @tx_addr: The transmit descriptor base.
+ * @rx_addr: The receive descriptor base.
+ *
+ * This routine programs the descriptor base addresses after reset.
+ */
+static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
+{
+ /* Set base address of Tx/Rx descriptors. */
+ writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
+ writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
+}
+
+static void hw_reset_pkts(struct ksz_desc_info *info)
+{
+ info->cur = info->ring;
+ info->avail = info->alloc;
+ info->last = info->next = 0;
+}
+
+static inline void hw_resume_rx(struct ksz_hw *hw)
+{
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+}
+
+/**
+ * hw_start_rx - start receiving
+ * @hw: The hardware instance.
+ *
+ * This routine starts the receive function of the hardware.
+ */
+static void hw_start_rx(struct ksz_hw *hw)
+{
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ /* Notify when the receive stops. */
+ hw->intr_mask |= KS884X_INT_RX_STOPPED;
+
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+ hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
+ hw->rx_stop++;
+
+ /* Variable overflows. */
+ if (0 == hw->rx_stop)
+ hw->rx_stop = 2;
+}
+
+/*
+ * hw_stop_rx - stop receiving
+ * @hw: The hardware instance.
+ *
+ * This routine stops the receive function of the hardware.
+ */
+static void hw_stop_rx(struct ksz_hw *hw)
+{
+ hw->rx_stop = 0;
+ hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
+ writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
+}
+
+/**
+ * hw_start_tx - start transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine starts the transmit function of the hardware.
+ */
+static void hw_start_tx(struct ksz_hw *hw)
+{
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_stop_tx - stop transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine stops the transmit function of the hardware.
+ */
+static void hw_stop_tx(struct ksz_hw *hw)
+{
+ writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_disable - disable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine disables the hardware.
+ */
+static void hw_disable(struct ksz_hw *hw)
+{
+ hw_stop_rx(hw);
+ hw_stop_tx(hw);
+ hw->enabled = 0;
+}
+
+/**
+ * hw_enable - enable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine enables the hardware.
+ */
+static void hw_enable(struct ksz_hw *hw)
+{
+ hw_start_tx(hw);
+ hw_start_rx(hw);
+ hw->enabled = 1;
+}
+
+/**
+ * hw_alloc_pkt - allocate enough descriptors for transmission
+ * @hw: The hardware instance.
+ * @length: The length of the packet.
+ * @physical: Number of descriptors required.
+ *
+ * This function allocates descriptors for transmission.
+ *
+ * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
+ */
+static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
+{
+ /* Always leave one descriptor free. */
+ if (hw->tx_desc_info.avail <= 1)
+ return 0;
+
+ /* Allocate a descriptor for transmission and mark it current. */
+ get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
+ hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
+
+ /* Keep track of number of transmit descriptors used so far. */
+ ++hw->tx_int_cnt;
+ hw->tx_size += length;
+
+ /* Cannot hold on too much data. */
+ if (hw->tx_size >= MAX_TX_HELD_SIZE)
+ hw->tx_int_cnt = hw->tx_int_mask + 1;
+
+ if (physical > hw->tx_desc_info.avail)
+ return 1;
+
+ return hw->tx_desc_info.avail;
+}
+
+/**
+ * hw_send_pkt - mark packet for transmission
+ * @hw: The hardware instance.
+ *
+ * This routine marks the packet for transmission in PCI version.
+ */
+static void hw_send_pkt(struct ksz_hw *hw)
+{
+ struct ksz_desc *cur = hw->tx_desc_info.cur;
+
+ cur->sw.buf.tx.last_seg = 1;
+
+ /* Interrupt only after specified number of descriptors used. */
+ if (hw->tx_int_cnt > hw->tx_int_mask) {
+ cur->sw.buf.tx.intr = 1;
+ hw->tx_int_cnt = 0;
+ hw->tx_size = 0;
+ }
+
+ /* KSZ8842 supports port directed transmission. */
+ cur->sw.buf.tx.dest_port = hw->dst_ports;
+
+ release_desc(cur);
+
+ writel(0, hw->io + KS_DMA_TX_START);
+}
+
+static int empty_addr(u8 *addr)
+{
+ u32 *addr1 = (u32 *) addr;
+ u16 *addr2 = (u16 *) &addr[4];
+
+ return 0 == *addr1 && 0 == *addr2;
+}
+
+/**
+ * hw_set_addr - set MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine programs the MAC address of the hardware when the address is
+ * overrided.
+ */
+static void hw_set_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
+ hw->io + KS884X_ADDR_0_OFFSET + i);
+
+ sw_set_addr(hw, hw->override_addr);
+}
+
+/**
+ * hw_read_addr - read MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine retrieves the MAC address of the hardware.
+ */
+static void hw_read_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
+ KS884X_ADDR_0_OFFSET + i);
+
+ if (!hw->mac_override) {
+ memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ if (empty_addr(hw->override_addr)) {
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ hw->override_addr[5] += hw->id;
+ hw_set_addr(hw);
+ }
+ }
+}
+
+static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
+{
+ int i;
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ mac_addr_hi = 0;
+ for (i = 0; i < 2; i++) {
+ mac_addr_hi <<= 8;
+ mac_addr_hi |= mac_addr[i];
+ }
+ mac_addr_hi |= ADD_ADDR_ENABLE;
+ mac_addr_lo = 0;
+ for (i = 2; i < 6; i++) {
+ mac_addr_lo <<= 8;
+ mac_addr_lo |= mac_addr[i];
+ }
+ index *= ADD_ADDR_INCR;
+
+ writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
+ writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
+}
+
+static void hw_set_add_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
+ if (empty_addr(hw->address[i]))
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ else
+ hw_ena_add_addr(hw, i, hw->address[i]);
+ }
+}
+
+static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+ int j = ADDITIONAL_ENTRIES;
+
+ if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ return 0;
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ return 0;
+ if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
+ j = i;
+ }
+ if (j < ADDITIONAL_ENTRIES) {
+ memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ hw_ena_add_addr(hw, j, hw->address[j]);
+ return 0;
+ }
+ return -1;
+}
+
+static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
+ memset(hw->address[i], 0, MAC_ADDR_LEN);
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/**
+ * hw_clr_multicast - clear multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine removes all multicast addresses set in the hardware.
+ */
+static void hw_clr_multicast(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++) {
+ hw->multi_bits[i] = 0;
+
+ writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
+ }
+}
+
+/**
+ * hw_set_grp_addr - set multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine programs multicast addresses for the hardware to accept those
+ * addresses.
+ */
+static void hw_set_grp_addr(struct ksz_hw *hw)
+{
+ int i;
+ int index;
+ int position;
+ int value;
+
+ memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
+
+ for (i = 0; i < hw->multi_list_size; i++) {
+ position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ hw->multi_bits[index] |= (u8) value;
+ }
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++)
+ writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
+ i);
+}
+
+/**
+ * hw_set_multicast - enable or disable all multicast receiving
+ * @hw: The hardware instance.
+ * @multicast: To turn on or off the all multicast feature.
+ *
+ * This routine enables/disables the hardware to accept all multicast packets.
+ */
+static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (multicast)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ else
+ hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * hw_set_promiscuous - enable or disable promiscuous receiving
+ * @hw: The hardware instance.
+ * @prom: To turn on or off the promiscuous feature.
+ *
+ * This routine enables/disables the hardware to accept all packets.
+ */
+static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (prom)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+ else
+ hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * sw_enable - enable the switch
+ * @hw: The hardware instance.
+ * @enable: The flag to enable or disable the switch
+ *
+ * This routine is used to enable/disable the switch in KSZ8842.
+ */
+static void sw_enable(struct ksz_hw *hw, int enable)
+{
+ int port;
+
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (hw->dev_count > 1) {
+ /* Set port-base vlan membership with host port. */
+ sw_cfg_port_base_vlan(hw, port,
+ HOST_MASK | (1 << port));
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ } else {
+ sw_cfg_port_base_vlan(hw, port, PORT_MASK);
+ port_set_stp_state(hw, port, STP_STATE_FORWARDING);
+ }
+ }
+ if (hw->dev_count > 1)
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ else
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
+
+ if (enable)
+ enable = KS8842_START;
+ writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
+}
+
+/**
+ * sw_setup - setup the switch
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware switch engine for default operation.
+ */
+static void sw_setup(struct ksz_hw *hw)
+{
+ int port;
+
+ sw_set_global_ctrl(hw);
+
+ /* Enable switch broadcast storm protection at 10% percent rate. */
+ sw_init_broad_storm(hw);
+ hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
+ for (port = 0; port < SWITCH_PORT_NUM; port++)
+ sw_ena_broad_storm(hw, port);
+
+ sw_init_prio(hw);
+
+ sw_init_mirror(hw);
+
+ sw_init_prio_rate(hw);
+
+ sw_init_vlan(hw);
+
+ if (hw->features & STP_SUPPORT)
+ sw_init_stp(hw);
+ if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ sw_enable(hw, 1);
+}
+
+/**
+ * ksz_start_timer - start kernel timer
+ * @info: Kernel timer information.
+ * @time: The time tick.
+ *
+ * This routine starts the kernel timer after the specified time tick.
+ */
+static void ksz_start_timer(struct ksz_timer_info *info, int time)
+{
+ info->cnt = 0;
+ info->timer.expires = jiffies + time;
+ add_timer(&info->timer);
+
+ /* infinity */
+ info->max = -1;
+}
+
+/**
+ * ksz_stop_timer - stop kernel timer
+ * @info: Kernel timer information.
+ *
+ * This routine stops the kernel timer.
+ */
+static void ksz_stop_timer(struct ksz_timer_info *info)
+{
+ if (info->max) {
+ info->max = 0;
+ del_timer_sync(&info->timer);
+ }
+}
+
+static void ksz_init_timer(struct ksz_timer_info *info, int period,
+ void (*function)(unsigned long), void *data)
+{
+ info->max = 0;
+ info->period = period;
+ init_timer(&info->timer);
+ info->timer.function = function;
+ info->timer.data = (unsigned long) data;
+}
+
+static void ksz_update_timer(struct ksz_timer_info *info)
+{
+ ++info->cnt;
+ if (info->max > 0) {
+ if (info->cnt < info->max) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ } else
+ info->max = 0;
+ } else if (info->max < 0) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ }
+}
+
+/**
+ * ksz_alloc_soft_desc - allocate software descriptors
+ * @desc_info: Descriptor information structure.
+ * @transmit: Indication that descriptors are for transmit.
+ *
+ * This local function allocates software descriptors for manipulation in
+ * memory.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
+ if (!desc_info->ring)
+ return 1;
+ memset((void *) desc_info->ring, 0,
+ sizeof(struct ksz_desc) * desc_info->alloc);
+ hw_init_desc(desc_info, transmit);
+ return 0;
+}
+
+/**
+ * ksz_alloc_desc - allocate hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local function allocates hardware descriptors for receiving and
+ * transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+ int offset;
+
+ /* Allocate memory for RX & TX descriptors. */
+ adapter->desc_pool.alloc_size =
+ hw->rx_desc_info.size * hw->rx_desc_info.alloc +
+ hw->tx_desc_info.size * hw->tx_desc_info.alloc +
+ DESC_ALIGNMENT;
+
+ adapter->desc_pool.alloc_virt =
+ pci_alloc_consistent(
+ adapter->pdev, adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
+ if (adapter->desc_pool.alloc_virt == NULL) {
+ adapter->desc_pool.alloc_size = 0;
+ return 1;
+ }
+ memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
+
+ /* Align to the next cache line boundary. */
+ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
+ (DESC_ALIGNMENT -
+ ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
+ adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
+ adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
+
+ /* Allocate receive/transmit descriptors. */
+ hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ adapter->desc_pool.virt;
+ hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
+ offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
+ hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ (adapter->desc_pool.virt + offset);
+ hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
+
+ if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
+ return 1;
+ if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * free_dma_buf - release DMA buffer resources
+ * @adapter: Adapter information structure.
+ *
+ * This routine is just a helper function to release the DMA buffer resources.
+ */
+static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
+ int direction)
+{
+ pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+ dev_kfree_skb(dma_buf->skb);
+ dma_buf->skb = NULL;
+ dma_buf->dma = 0;
+}
+
+/**
+ * ksz_init_rx_buffers - initialize receive descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This routine initializes DMA buffers for receiving.
+ */
+static void ksz_init_rx_buffers(struct dev_info *adapter)
+{
+ int i;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_hw *hw = &adapter->hw;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+
+ for (i = 0; i < hw->rx_desc_info.alloc; i++) {
+ get_rx_pkt(info, &desc);
+
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb && dma_buf->len != adapter->mtu)
+ free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+ dma_buf->len = adapter->mtu;
+ if (!dma_buf->skb)
+ dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
+ if (dma_buf->skb && !dma_buf->dma) {
+ dma_buf->skb->dev = adapter->dev;
+ dma_buf->dma = pci_map_single(
+ adapter->pdev,
+ skb_tail_pointer(dma_buf->skb),
+ dma_buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ /* Set descriptor. */
+ set_rx_buf(desc, dma_buf->dma);
+ set_rx_len(desc, dma_buf->len);
+ release_desc(desc);
+ }
+}
+
+/**
+ * ksz_alloc_mem - allocate memory for hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This function allocates memory for use by hardware descriptors for receiving
+ * and transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_mem(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Determine the number of receive and transmit descriptors. */
+ hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
+ hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
+
+ /* Determine how many descriptors to skip transmit interrupt. */
+ hw->tx_int_cnt = 0;
+ hw->tx_int_mask = NUM_OF_TX_DESC / 4;
+ if (hw->tx_int_mask > 8)
+ hw->tx_int_mask = 8;
+ while (hw->tx_int_mask) {
+ hw->tx_int_cnt++;
+ hw->tx_int_mask >>= 1;
+ }
+ if (hw->tx_int_cnt) {
+ hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
+ hw->tx_int_cnt = 0;
+ }
+
+ /* Determine the descriptor size. */
+ hw->rx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ hw->tx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
+ printk(KERN_ALERT
+ "Hardware descriptor size not right!\n");
+ ksz_check_desc_num(&hw->rx_desc_info);
+ ksz_check_desc_num(&hw->tx_desc_info);
+
+ /* Allocate descriptors. */
+ if (ksz_alloc_desc(adapter))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ksz_free_desc - free software and hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees the software and hardware descriptors allocated by
+ * ksz_alloc_desc().
+ */
+static void ksz_free_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Reset descriptor. */
+ hw->rx_desc_info.ring_virt = NULL;
+ hw->tx_desc_info.ring_virt = NULL;
+ hw->rx_desc_info.ring_phys = 0;
+ hw->tx_desc_info.ring_phys = 0;
+
+ /* Free memory. */
+ if (adapter->desc_pool.alloc_virt)
+ pci_free_consistent(
+ adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ adapter->desc_pool.alloc_virt,
+ adapter->desc_pool.dma_addr);
+
+ /* Reset resource pool. */
+ adapter->desc_pool.alloc_size = 0;
+ adapter->desc_pool.alloc_virt = NULL;
+
+ kfree(hw->rx_desc_info.ring);
+ hw->rx_desc_info.ring = NULL;
+ kfree(hw->tx_desc_info.ring);
+ hw->tx_desc_info.ring = NULL;
+}
+
+/**
+ * ksz_free_buffers - free buffers used in the descriptors
+ * @adapter: Adapter information structure.
+ * @desc_info: Descriptor information structure.
+ *
+ * This local routine frees buffers used in the DMA buffers.
+ */
+static void ksz_free_buffers(struct dev_info *adapter,
+ struct ksz_desc_info *desc_info, int direction)
+{
+ int i;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_desc *desc = desc_info->ring;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb)
+ free_dma_buf(adapter, dma_buf, direction);
+ desc++;
+ }
+}
+
+/**
+ * ksz_free_mem - free all resources used by descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees all the resources allocated by ksz_alloc_mem().
+ */
+static void ksz_free_mem(struct dev_info *adapter)
+{
+ /* Free transmit buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
+ PCI_DMA_TODEVICE);
+
+ /* Free receive buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
+ PCI_DMA_FROMDEVICE);
+
+ /* Free descriptors. */
+ ksz_free_desc(adapter);
+}
+
+static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
+ u64 *counter)
+{
+ int i;
+ int mib;
+ int port;
+ struct ksz_port_mib *port_mib;
+
+ memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ for (i = 0, port = first; i < cnt; i++, port++) {
+ port_mib = &hw->port_mib[port];
+ for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
+ counter[mib] += port_mib->counter[mib];
+ }
+}
+
+/**
+ * send_packet - send packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This routine is used to send a packet out to the network.
+ */
+static void send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz_desc *desc;
+ struct ksz_desc *first;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_dma_buf *dma_buf;
+ int len;
+ int last_frag = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * KSZ8842 with multiple device interfaces needs to be told which port
+ * to send.
+ */
+ if (hw->dev_count > 1)
+ hw->dst_ports = 1 << priv->port.first_port;
+
+ /* Hardware will pad the length to 60. */
+ len = skb->len;
+
+ /* Remember the very first descriptor. */
+ first = info->cur;
+ desc = first;
+
+ dma_buf = DMA_BUFFER(desc);
+ if (last_frag) {
+ int frag;
+ skb_frag_t *this_frag;
+
+ dma_buf->len = skb->len - skb->data_len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag = 0;
+ do {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+
+ /* Get a new descriptor. */
+ get_tx_pkt(info, &desc);
+
+ /* Keep track of descriptors used so far. */
+ ++hw->tx_int_cnt;
+
+ dma_buf = DMA_BUFFER(desc);
+ dma_buf->len = this_frag->size;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev,
+ page_address(this_frag->page) +
+ this_frag->page_offset,
+ dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag++;
+ if (frag == last_frag)
+ break;
+
+ /* Do not release the last descriptor here. */
+ release_desc(desc);
+ } while (1);
+
+ /* current points to the last descriptor. */
+ info->cur = desc;
+
+ /* Release the first descriptor. */
+ release_desc(first);
+ } else {
+ dma_buf->len = len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ (desc)->sw.buf.tx.csum_gen_tcp = 1;
+ (desc)->sw.buf.tx.csum_gen_udp = 1;
+ }
+
+ /*
+ * The last descriptor holds the packet so that it can be returned to
+ * network subsystem after all descriptors are transmitted.
+ */
+ dma_buf->skb = skb;
+
+ hw_send_pkt(hw);
+
+ /* Update transmit statistics. */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+}
+
+/**
+ * transmit_cleanup - clean up transmit descriptors
+ * @dev: Network device.
+ *
+ * This routine is called to clean up the transmitted buffers.
+ */
+static void transmit_cleanup(struct dev_info *hw_priv, int normal)
+{
+ int last;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct net_device *dev = NULL;
+
+ spin_lock(&hw_priv->hwlock);
+ last = info->last;
+
+ while (info->avail < info->alloc) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[last];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.tx.hw_owned) {
+ if (normal)
+ break;
+ else
+ reset_desc(desc, status);
+ }
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_unmap_single(
+ hw_priv->pdev, dma_buf->dma, dma_buf->len,
+ PCI_DMA_TODEVICE);
+
+ /* This descriptor contains the last buffer in the packet. */
+ if (dma_buf->skb) {
+ dev = dma_buf->skb->dev;
+
+ /* Release the packet back to network subsystem. */
+ dev_kfree_skb_irq(dma_buf->skb);
+ dma_buf->skb = NULL;
+ }
+
+ /* Free the transmitted descriptor. */
+ last++;
+ last &= info->mask;
+ info->avail++;
+ }
+ info->last = last;
+ spin_unlock(&hw_priv->hwlock);
+
+ /* Notify the network subsystem that the packet has been sent. */
+ if (dev)
+ dev->trans_start = jiffies;
+}
+
+/**
+ * transmit_done - transmit done processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit interrupt is triggered, indicating
+ * either a packet is sent successfully or there are transmit errors.
+ */
+static void tx_done(struct dev_info *hw_priv)
+{
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ transmit_cleanup(hw_priv, 1);
+
+ for (port = 0; port < hw->dev_count; port++) {
+ struct net_device *dev = hw->port_info[port].pdev;
+
+ if (netif_running(dev) && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
+{
+ skb->dev = old->dev;
+ skb->protocol = old->protocol;
+ skb->ip_summed = old->ip_summed;
+ skb->csum = old->csum;
+ skb_set_network_header(skb, ETH_HLEN);
+
+ dev_kfree_skb(old);
+}
+
+/**
+ * netdev_tx - send out packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This function is used by the upper network layer to send out a packet.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int left;
+ int num = 1;
+ int rc = 0;
+
+ if (hw->features & SMALL_PACKET_TX_BUG) {
+ struct sk_buff *org_skb = skb;
+
+ if (skb->len <= 48) {
+ if (skb_end_pointer(skb) - skb->data >= 50) {
+ memset(&skb->data[skb->len], 0, 50 - skb->len);
+ skb->len = 50;
+ } else {
+ skb = dev_alloc_skb(50);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ memcpy(skb->data, org_skb->data, org_skb->len);
+ memset(&skb->data[org_skb->len], 0,
+ 50 - org_skb->len);
+ skb->len = 50;
+ copy_old_skb(org_skb, skb);
+ }
+ }
+ }
+
+ spin_lock_irq(&hw_priv->hwlock);
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ left = hw_alloc_pkt(hw, skb->len, num);
+ if (left) {
+ if (left < num ||
+ ((hw->features & IPV6_CSUM_GEN_HACK) &&
+ (CHECKSUM_PARTIAL == skb->ip_summed) &&
+ (ETH_P_IPV6 == htons(skb->protocol)))) {
+ struct sk_buff *org_skb = skb;
+
+ skb = dev_alloc_skb(org_skb->len);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ skb_copy_and_csum_dev(org_skb, skb->data);
+ org_skb->ip_summed = 0;
+ skb->len = org_skb->len;
+ copy_old_skb(org_skb, skb);
+ }
+ send_packet(skb, dev);
+ if (left <= num)
+ netif_stop_queue(dev);
+ } else {
+ /* Stop the transmit queue until packet is allocated. */
+ netif_stop_queue(dev);
+ rc = NETDEV_TX_BUSY;
+ }
+
+ spin_unlock_irq(&hw_priv->hwlock);
+
+ return rc;
+}
+
+/**
+ * netdev_tx_timeout - transmit timeout processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit timer expires. That indicates the
+ * hardware is not running correctly because transmit interrupts are not
+ * triggered to free up resources so that the transmit routine can continue
+ * sending out packets. The hardware is reset to correct the problem.
+ */
+static void netdev_tx_timeout(struct net_device *dev)
+{
+ static unsigned long last_reset;
+
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ if (hw->dev_count > 1) {
+ /*
+ * Only reset the hardware if time between calls is long
+ * enough.
+ */
+ if (jiffies - last_reset <= dev->watchdog_timeo)
+ hw_priv = NULL;
+ }
+
+ last_reset = jiffies;
+ if (hw_priv) {
+ hw_dis_intr(hw);
+ hw_disable(hw);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+ ksz_init_rx_buffers(hw_priv);
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys,
+ hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ if (hw->all_multi)
+ hw_set_multicast(hw, hw->all_multi);
+ else if (hw->multi_list_size)
+ hw_set_grp_addr(hw);
+
+ if (hw->dev_count > 1) {
+ hw_set_add_addr(hw);
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ struct net_device *port_dev;
+
+ port_set_stp_state(hw, port,
+ STP_STATE_DISABLED);
+
+ port_dev = hw->port_info[port].pdev;
+ if (netif_running(port_dev))
+ port_set_stp_state(hw, port,
+ STP_STATE_SIMPLE);
+ }
+ }
+
+ hw_enable(hw);
+ hw_ena_intr(hw);
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static inline void csum_verified(struct sk_buff *skb)
+{
+ unsigned short protocol;
+ struct iphdr *iph;
+
+ protocol = skb->protocol;
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *) skb_network_header(skb);
+ if (protocol == htons(ETH_P_8021Q)) {
+ protocol = iph->tot_len;
+ skb_set_network_header(skb, VLAN_HLEN);
+ iph = (struct iphdr *) skb_network_header(skb);
+ }
+ if (protocol == htons(ETH_P_IP)) {
+ if (iph->protocol == IPPROTO_TCP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
+ struct ksz_desc *desc, union desc_stat status)
+{
+ int packet_len;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_dma_buf *dma_buf;
+ struct sk_buff *skb;
+ int rx_status;
+
+ /* Received length includes 4-byte CRC. */
+ packet_len = status.rx.frame_len - 4;
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_dma_sync_single_for_cpu(
+ hw_priv->pdev, dma_buf->dma, packet_len + 4,
+ PCI_DMA_FROMDEVICE);
+
+ do {
+ /* skb->data != skb->head */
+ skb = dev_alloc_skb(packet_len + 2);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ /*
+ * Align socket buffer in 4-byte boundary for better
+ * performance.
+ */
+ skb_reserve(skb, 2);
+
+ memcpy(skb_put(skb, packet_len),
+ dma_buf->skb->data, packet_len);
+ } while (0);
+
+ skb->dev = dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
+ csum_verified(skb);
+
+ /* Update receive statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += packet_len;
+
+ /* Notify upper layer for received packet. */
+ dev->last_rx = jiffies;
+
+ rx_status = netif_rx(skb);
+
+ return 0;
+}
+
+static int dev_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int port_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int dev_rcv_special(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ /*
+ * Receive without error. With receive errors
+ * disabled, packets with receive errors will be
+ * dropped, so no need to check the error bit.
+ */
+ if (!status.rx.error || (status.data &
+ KS_DESC_RX_ERROR_COND) ==
+ KS_DESC_RX_ERROR_TOO_LONG) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ } else {
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* Update receive error statistics. */
+ priv->port.counter[OID_COUNTER_RCV_ERROR]++;
+ }
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static void rx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (!hw->enabled)
+ return;
+ if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
+
+ /* In case receive process is suspended because of overrun. */
+ hw_resume_rx(hw);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
+ spin_unlock_irq(&hw_priv->hwlock);
+ } else {
+ hw_ack_intr(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+}
+
+static void tx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_ack_intr(hw, KS884X_INT_TX_MASK);
+
+ tx_done(hw_priv);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_TX);
+ spin_unlock_irq(&hw_priv->hwlock);
+}
+
+static inline void handle_rx_stop(struct ksz_hw *hw)
+{
+ /* Receive just has been stopped. */
+ if (0 == hw->rx_stop)
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ else if (hw->rx_stop > 1) {
+ if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
+ hw_start_rx(hw);
+ } else {
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ hw->rx_stop = 0;
+ }
+ } else
+ /* Receive just has been started. */
+ hw->rx_stop++;
+}
+
+/**
+ * netdev_intr - interrupt handling
+ * @irq: Interrupt number.
+ * @dev_id: Network device.
+ *
+ * This function is called by upper network layer to signal interrupt.
+ *
+ * Return IRQ_HANDLED if interrupt is handled.
+ */
+static irqreturn_t netdev_intr(int irq, void *dev_id)
+{
+ uint int_enable = 0;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_read_intr(hw, &int_enable);
+
+ /* Not our interrupt! */
+ if (!int_enable)
+ return IRQ_NONE;
+
+ do {
+ hw_ack_intr(hw, int_enable);
+ int_enable &= hw->intr_mask;
+
+ if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
+ hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
+ tasklet_schedule(&hw_priv->tx_tasklet);
+ }
+
+ if (likely(int_enable & KS884X_INT_RX)) {
+ hw_dis_intr_bit(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
+ priv->stats.rx_fifo_errors++;
+ hw_resume_rx(hw);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_PHY)) {
+ struct ksz_port *port = &priv->port;
+
+ hw->features |= LINK_INT_WORKING;
+ port_get_link_speed(port);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
+ handle_rx_stop(hw);
+ break;
+ }
+
+ if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
+ u32 data;
+
+ hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
+ printk(KERN_INFO "Tx stopped\n");
+ data = readl(hw->io + KS_DMA_TX_CTRL);
+ if (!(data & DMA_TX_ENABLE))
+ printk(KERN_INFO "Tx disabled\n");
+ break;
+ }
+ } while (0);
+
+ hw_ena_intr(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Linux network device functions
+ */
+
+static unsigned long next_jiffies;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netdev_netpoll(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ hw_dis_intr(&hw_priv->hw);
+ netdev_intr(dev->irq, dev);
+}
+#endif
+
+static void bridge_change(struct ksz_hw *hw)
+{
+ int port;
+ u8 member;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* No ports in forwarding state. */
+ if (!sw->member) {
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ sw_block_addr(hw);
+ }
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
+ member = HOST_MASK | sw->member;
+ else
+ member = HOST_MASK | (1 << port);
+ if (member != sw->port_cfg[port].member)
+ sw_cfg_port_base_vlan(hw, port, member);
+ }
+}
+
+/**
+ * netdev_close - close network device
+ * @dev: Network device.
+ *
+ * This function process the close operation of network device. This is caused
+ * by the user command "ifconfig ethX down."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int pi;
+
+ netif_stop_queue(dev);
+
+ ksz_stop_timer(&priv->monitor_timer_info);
+
+ /* Need to shut the port manually in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
+
+ /* Port is closed. Need to change bridge setting. */
+ if (hw->features & STP_SUPPORT) {
+ pi = 1 << port->first_port;
+ if (hw->ksz_switch->member & pi) {
+ hw->ksz_switch->member &= ~pi;
+ bridge_change(hw);
+ }
+ }
+ }
+ if (port->first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ if (!hw_priv->wol_enable)
+ port_set_power_saving(port, true);
+
+ if (priv->multicast)
+ --hw->all_multi;
+ if (priv->promiscuous)
+ --hw->promiscuous;
+
+ hw_priv->opened--;
+ if (!(hw_priv->opened)) {
+ ksz_stop_timer(&hw_priv->mib_timer_info);
+ flush_work(&hw_priv->mib_read);
+
+ hw_dis_intr(hw);
+ hw_disable(hw);
+ hw_clr_multicast(hw);
+
+ /* Delay for receive task to stop scheduling itself. */
+ msleep(2000 / HZ);
+
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+ free_irq(dev->irq, hw_priv->dev);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+
+ /* Clean out static MAC table when the switch is shutdown. */
+ if (hw->features & STP_SUPPORT)
+ sw_clr_sta_mac_table(hw);
+ }
+
+ return 0;
+}
+
+static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
+{
+ if (hw->ksz_switch) {
+ u32 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ if (hw->features & RX_HUGE_FRAME)
+ data |= SWITCH_HUGE_PACKET;
+ else
+ data &= ~SWITCH_HUGE_PACKET;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ }
+ if (hw->features & RX_HUGE_FRAME) {
+ hw->rx_cfg |= DMA_RX_ERROR;
+ hw_priv->dev_rcv = dev_rcv_special;
+ } else {
+ hw->rx_cfg &= ~DMA_RX_ERROR;
+ if (hw->dev_count > 1)
+ hw_priv->dev_rcv = port_rcv_packets;
+ else
+ hw_priv->dev_rcv = dev_rcv_packets;
+ }
+}
+
+static int prepare_hardware(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int rc = 0;
+
+ /* Remember the network device that requests interrupts. */
+ hw_priv->dev = dev;
+ rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return rc;
+ tasklet_enable(&hw_priv->rx_tasklet);
+ tasklet_enable(&hw_priv->tx_tasklet);
+
+ hw->promiscuous = 0;
+ hw->all_multi = 0;
+ hw->multi_list_size = 0;
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ hw_cfg_huge_frame(hw_priv, hw);
+ ksz_init_rx_buffers(hw_priv);
+ return 0;
+}
+
+/**
+ * netdev_open - open network device
+ * @dev: Network device.
+ *
+ * This function process the open operation of network device. This is caused
+ * by the user command "ifconfig ethX up."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int i;
+ int p;
+ int rc = 0;
+
+ priv->multicast = 0;
+ priv->promiscuous = 0;
+
+ /* Reset device statistics. */
+ memset(&priv->stats, 0, sizeof(struct net_device_stats));
+ memset((void *) port->counter, 0,
+ (sizeof(u64) * OID_COUNTER_LAST));
+
+ if (!(hw_priv->opened)) {
+ rc = prepare_hardware(dev);
+ if (rc)
+ return rc;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ if (next_jiffies < jiffies)
+ next_jiffies = jiffies + HZ * 2;
+ else
+ next_jiffies += HZ * 1;
+ hw_priv->counter[i].time = next_jiffies;
+ hw->port_mib[i].state = media_disconnected;
+ port_init_cnt(hw, i);
+ }
+ if (hw->ksz_switch)
+ hw->port_mib[HOST_PORT].state = media_connected;
+ else {
+ hw_add_wol_bcast(hw);
+ hw_cfg_wol_pme(hw, 0);
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ }
+ }
+ port_set_power_saving(port, false);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ /*
+ * Initialize to invalid value so that link detection
+ * is done.
+ */
+ hw->port_info[p].partner = 0xFF;
+ hw->port_info[p].state = media_disconnected;
+ }
+
+ /* Need to open the port in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
+ if (port->first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ }
+
+ port_get_link_speed(port);
+ if (port->force_link)
+ port_force_link_speed(port);
+ else
+ port_set_link_speed(port);
+
+ if (!(hw_priv->opened)) {
+ hw_setup_intr(hw);
+ hw_enable(hw);
+ hw_ena_intr(hw);
+
+ if (hw->mib_port_cnt)
+ ksz_start_timer(&hw_priv->mib_timer_info,
+ hw_priv->mib_timer_info.period);
+ }
+
+ hw_priv->opened++;
+
+ ksz_start_timer(&priv->monitor_timer_info,
+ priv->monitor_timer_info.period);
+
+ priv->media_state = port->linked->state;
+
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* RX errors = rx_errors */
+/* RX dropped = rx_dropped */
+/* RX overruns = rx_fifo_errors */
+/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
+/* TX errors = tx_errors */
+/* TX dropped = tx_dropped */
+/* TX overruns = tx_fifo_errors */
+/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
+/* collisions = collisions */
+
+/**
+ * netdev_query_statistics - query network device statistics
+ * @dev: Network device.
+ *
+ * This function returns the statistics of the network device. The device
+ * needs not be opened.
+ *
+ * Return network device statistics.
+ */
+static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &priv->adapter->hw;
+ struct ksz_port_mib *mib;
+ int i;
+ int p;
+
+ priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+ priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+
+ /* Reset to zero to add count later. */
+ priv->stats.multicast = 0;
+ priv->stats.collisions = 0;
+ priv->stats.rx_length_errors = 0;
+ priv->stats.rx_crc_errors = 0;
+ priv->stats.rx_frame_errors = 0;
+ priv->stats.tx_window_errors = 0;
+
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ mib = &hw->port_mib[p];
+
+ priv->stats.multicast += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_MULTICAST];
+
+ priv->stats.collisions += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
+
+ priv->stats.rx_length_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
+ mib->counter[MIB_COUNTER_RX_FRAGMENT] +
+ mib->counter[MIB_COUNTER_RX_OVERSIZE] +
+ mib->counter[MIB_COUNTER_RX_JABBER]);
+ priv->stats.rx_crc_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_CRC_ERR];
+ priv->stats.rx_frame_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
+ mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
+
+ priv->stats.tx_window_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * netdev_set_mac_address - set network device MAC address
+ * @dev: Network device.
+ * @addr: Buffer of MAC address.
+ *
+ * This function is used to set the MAC address of the network device.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct sockaddr *mac = addr;
+ uint interrupt;
+
+ if (priv->port.first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ else {
+ hw->mac_override = 1;
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+ memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+
+ interrupt = hw_block_intr(hw);
+
+ if (priv->port.first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ else
+ hw_set_addr(hw);
+ hw_restore_intr(hw, interrupt);
+
+ return 0;
+}
+
+static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_hw *hw, int promiscuous)
+{
+ if (promiscuous != priv->promiscuous) {
+ u8 prev_state = hw->promiscuous;
+
+ if (promiscuous)
+ ++hw->promiscuous;
+ else
+ --hw->promiscuous;
+ priv->promiscuous = promiscuous;
+
+ /* Turn on/off promiscuous mode. */
+ if (hw->promiscuous <= 1 && prev_state <= 1)
+ hw_set_promiscuous(hw, hw->promiscuous);
+
+ /*
+ * Port is not in promiscuous mode, meaning it is released
+ * from the bridge.
+ */
+ if ((hw->features & STP_SUPPORT) && !promiscuous &&
+ dev->br_port) {
+ struct ksz_switch *sw = hw->ksz_switch;
+ int port = priv->port.first_port;
+
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ port = 1 << port;
+ if (sw->member & port) {
+ sw->member &= ~port;
+ bridge_change(hw);
+ }
+ }
+ }
+}
+
+static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
+ int multicast)
+{
+ if (multicast != priv->multicast) {
+ u8 all_multi = hw->all_multi;
+
+ if (multicast)
+ ++hw->all_multi;
+ else
+ --hw->all_multi;
+ priv->multicast = multicast;
+
+ /* Turn on/off all multicast mode. */
+ if (hw->all_multi <= 1 && all_multi <= 1)
+ hw_set_multicast(hw, hw->all_multi);
+ }
+}
+
+/**
+ * netdev_set_rx_mode
+ * @dev: Network device.
+ *
+ * This routine is used to set multicast addresses or put the network device
+ * into promiscuous mode.
+ */
+static void netdev_set_rx_mode(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct dev_mc_list *mc_ptr;
+ int multicast = (dev->flags & IFF_ALLMULTI);
+
+ dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
+
+ if (hw_priv->hw.dev_count > 1)
+ multicast |= (dev->flags & IFF_MULTICAST);
+ dev_set_multicast(priv, hw, multicast);
+
+ /* Cannot use different hashes in multiple device interfaces mode. */
+ if (hw_priv->hw.dev_count > 1)
+ return;
+
+ if ((dev->flags & IFF_MULTICAST) && dev->mc_count) {
+ int i = 0;
+
+ /* List too big to support so turn on all multicast mode. */
+ if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (MAX_MULTICAST_LIST != hw->multi_list_size) {
+ hw->multi_list_size = MAX_MULTICAST_LIST;
+ ++hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ return;
+ }
+
+ for (mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ if (!(*mc_ptr->dmi_addr & 1))
+ continue;
+ if (i >= MAX_MULTICAST_LIST)
+ break;
+ memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
+ MAC_ADDR_LEN);
+ }
+ hw->multi_list_size = (u8) i;
+ hw_set_grp_addr(hw);
+ } else {
+ if (MAX_MULTICAST_LIST == hw->multi_list_size) {
+ --hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ hw->multi_list_size = 0;
+ hw_clr_multicast(hw);
+ }
+}
+
+static int netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int hw_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Cannot use different MTU in multiple device interfaces mode. */
+ if (hw->dev_count > 1)
+ if (dev != hw_priv->dev)
+ return 0;
+ if (new_mtu < 60)
+ return -EINVAL;
+
+ if (dev->mtu != new_mtu) {
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > MAX_RX_BUF_SIZE)
+ return -EINVAL;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
+ }
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+ }
+ return 0;
+}
+
+/**
+ * netdev_ioctl - I/O control processing
+ * @dev: Network device.
+ * @ifr: Interface request structure.
+ * @cmd: I/O control code.
+ *
+ * This function is used to process I/O control calls.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int rc;
+ int result = 0;
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (down_interruptible(&priv->proc_sem))
+ return -ERESTARTSYS;
+
+ /* assume success */
+ rc = 0;
+ switch (cmd) {
+ /* Get address of MII PHY in use. */
+ case SIOCGMIIPHY:
+ data->phy_id = priv->id;
+
+ /* Fallthrough... */
+
+ /* Read MII PHY register. */
+ case SIOCGMIIREG:
+ if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_r_phy(hw, port->linked->port_id, data->reg_num,
+ &data->val_out);
+ break;
+
+ /* Write MII PHY register. */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ result = -EPERM;
+ else if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_w_phy(hw, port->linked->port_id, data->reg_num,
+ data->val_in);
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ }
+
+ up(&priv->proc_sem);
+
+ return result;
+}
+
+/*
+ * MII support
+ */
+
+/**
+ * mdio_read - read PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ *
+ * This function returns the PHY register value.
+ *
+ * Return the register value.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ u16 val_out;
+
+ hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
+ return val_out;
+}
+
+/**
+ * mdio_write - set PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ * @val: The register value.
+ *
+ * This procedure sets the PHY register value.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int pi;
+
+ for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
+ hw_w_phy(hw, pi, reg_num << 1, val);
+}
+
+/*
+ * ethtool support
+ */
+
+#define EEPROM_SIZE 0x40
+
+static u16 eeprom_data[EEPROM_SIZE] = { 0 };
+
+#define ADVERTISED_ALL \
+ (ADVERTISED_10baseT_Half | \
+ ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | \
+ ADVERTISED_100baseT_Full)
+
+/* These functions use the MII functions in mii.c. */
+
+/**
+ * netdev_get_settings - get network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function queries the PHY and returns its state in the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ mutex_lock(&hw_priv->lock);
+ mii_ethtool_gset(&priv->mii_if, cmd);
+ cmd->advertising |= SUPPORTED_TP;
+ mutex_unlock(&hw_priv->lock);
+
+ /* Save advertised settings for workaround in next function. */
+ priv->advertising = cmd->advertising;
+ return 0;
+}
+
+/**
+ * netdev_set_settings - set network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function sets the PHY according to the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ int rc;
+
+ /*
+ * ethtool utility does not change advertised setting if auto
+ * negotiation is not specified explicitly.
+ */
+ if (cmd->autoneg && priv->advertising == cmd->advertising) {
+ cmd->advertising |= ADVERTISED_ALL;
+ if (10 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half);
+ else if (100 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half);
+ if (0 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else if (1 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ mutex_lock(&hw_priv->lock);
+ if (cmd->autoneg &&
+ (cmd->advertising & ADVERTISED_ALL) ==
+ ADVERTISED_ALL) {
+ port->duplex = 0;
+ port->speed = 0;
+ port->force_link = 0;
+ } else {
+ port->duplex = cmd->duplex + 1;
+ if (cmd->speed != 1000)
+ port->speed = cmd->speed;
+ if (cmd->autoneg)
+ port->force_link = 0;
+ else
+ port->force_link = 1;
+ }
+ rc = mii_ethtool_sset(&priv->mii_if, cmd);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_nway_reset - restart auto-negotiation
+ * @dev: Network device.
+ *
+ * This function restarts the PHY for auto-negotiation.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ int rc;
+
+ mutex_lock(&hw_priv->lock);
+ rc = mii_nway_restart(&priv->mii_if);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_get_link - get network device link status
+ * @dev: Network device.
+ *
+ * This function gets the link status from the PHY.
+ *
+ * Return true if PHY is linked and false otherwise.
+ */
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int rc;
+
+ rc = mii_link_ok(&priv->mii_if);
+ return rc;
+}
+
+/**
+ * netdev_get_drvinfo - get network driver information
+ * @dev: Network device.
+ * @info: Ethtool driver info data structure.
+ *
+ * This procedure returns the driver information.
+ */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(hw_priv->pdev));
+}
+
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
+static struct hw_regs {
+ int start;
+ int end;
+} hw_regs_range[] = {
+ { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
+ { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
+ { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
+ { KS884X_SIDER_P, KS8842_SGCR7_P },
+ { KS8842_MACAR1_P, KS8842_TOSR8_P },
+ { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
+ { 0, 0 }
+};
+
+static int netdev_get_regs_len(struct net_device *dev)
+{
+ struct hw_regs *range = hw_regs_range;
+ int regs_len = 0x10 * sizeof(u32);
+
+ while (range->end > range->start) {
+ regs_len += (range->end - range->start + 3) / 4 * 4;
+ range++;
+ }
+ return regs_len;
+}
+
+/**
+ * netdev_get_regs - get register dump
+ * @dev: Network device.
+ * @regs: Ethtool registers data structure.
+ * @ptr: Buffer to store the register values.
+ *
+ * This procedure dumps the register values in the provided buffer.
+ */
+static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int *buf = (int *) ptr;
+ struct hw_regs *range = hw_regs_range;
+ int len;
+
+ mutex_lock(&hw_priv->lock);
+ regs->version = 0;
+ for (len = 0; len < 0x40; len += 4) {
+ pci_read_config_dword(hw_priv->pdev, len, buf);
+ buf++;
+ }
+ while (range->end > range->start) {
+ for (len = range->start; len < range->end; len += 4) {
+ *buf = readl(hw->io + len);
+ buf++;
+ }
+ range++;
+ }
+ mutex_unlock(&hw_priv->lock);
+}
+
+#define WOL_SUPPORT \
+ (WAKE_PHY | WAKE_MAGIC | \
+ WAKE_UCAST | WAKE_MCAST | \
+ WAKE_BCAST | WAKE_ARP)
+
+/**
+ * netdev_get_wol - get Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This procedure returns Wake-on-LAN support.
+ */
+static void netdev_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ wol->supported = hw_priv->wol_support;
+ wol->wolopts = hw_priv->wol_enable;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/**
+ * netdev_set_wol - set Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This function sets Wake-on-LAN support.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ if (wol->wolopts & ~hw_priv->wol_support)
+ return -EINVAL;
+
+ hw_priv->wol_enable = wol->wolopts;
+
+ /* Link wakeup cannot really be disabled. */
+ if (wol->wolopts)
+ hw_priv->wol_enable |= WAKE_PHY;
+ hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
+ return 0;
+}
+
+/**
+ * netdev_get_msglevel - get debug message level
+ * @dev: Network device.
+ *
+ * This function returns current debug message level.
+ *
+ * Return current debug message flags.
+ */
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * netdev_set_msglevel - set debug message level
+ * @dev: Network device.
+ * @value: Debug message flags.
+ *
+ * This procedure sets debug message level.
+ */
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = value;
+}
+
+/**
+ * netdev_get_eeprom_len - get EEPROM length
+ * @dev: Network device.
+ *
+ * This function returns the length of the EEPROM.
+ *
+ * Return length of the EEPROM.
+ */
+static int netdev_get_eeprom_len(struct net_device *dev)
+{
+ return EEPROM_SIZE * 2;
+}
+
+/**
+ * netdev_get_eeprom - get EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Buffer to store the EEPROM data.
+ *
+ * This function dumps the EEPROM data in the provided buffer.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+#define EEPROM_MAGIC 0x10A18842
+
+static int netdev_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u8 *eeprom_byte = (u8 *) eeprom_data;
+ int i;
+ int len;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ eeprom->magic = EEPROM_MAGIC;
+ memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+/**
+ * netdev_set_eeprom - write EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Data buffer.
+ *
+ * This function modifies the EEPROM data one byte at a time.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u16 eeprom_word[EEPROM_SIZE];
+ u8 *eeprom_byte = (u8 *) eeprom_word;
+ int i;
+ int len;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return 1;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
+ memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
+ for (i = 0; i < EEPROM_SIZE; i++)
+ if (eeprom_word[i] != eeprom_data[i]) {
+ eeprom_data[i] = eeprom_word[i];
+ eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * netdev_get_pauseparam - get flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This procedure returns the PAUSE control flow settings.
+ */
+static void netdev_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
+ if (!hw->ksz_switch) {
+ pause->rx_pause =
+ (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
+ pause->tx_pause =
+ (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
+ } else {
+ pause->rx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
+ pause->tx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
+ }
+}
+
+/**
+ * netdev_set_pauseparam - set flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This function sets the PAUSE control flow settings.
+ * Not implemented yet.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ mutex_lock(&hw_priv->lock);
+ if (pause->autoneg) {
+ if (!pause->rx_pause && !pause->tx_pause)
+ port->flow_ctrl = PHY_NO_FLOW_CTRL;
+ else
+ port->flow_ctrl = PHY_FLOW_CTRL;
+ hw->overrides &= ~PAUSE_FLOW_CTRL;
+ port->force_link = 0;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, 1);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, 1);
+ }
+ port_set_link_speed(port);
+ } else {
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, pause->rx_pause);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, pause->tx_pause);
+ } else
+ set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+/**
+ * netdev_get_ringparam - get tx/rx ring parameters
+ * @dev: Network device.
+ * @pause: Ethtool RING settings data structure.
+ *
+ * This procedure returns the TX/RX ring settings.
+ */
+static void netdev_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ ring->tx_max_pending = (1 << 9);
+ ring->tx_pending = hw->tx_desc_info.alloc;
+ ring->rx_max_pending = (1 << 9);
+ ring->rx_pending = hw->rx_desc_info.alloc;
+}
+
+#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[STATS_LEN] = {
+ { "rx_lo_priority_octets" },
+ { "rx_hi_priority_octets" },
+ { "rx_undersize_packets" },
+ { "rx_fragments" },
+ { "rx_oversize_packets" },
+ { "rx_jabbers" },
+ { "rx_symbol_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "rx_mac_ctrl_packets" },
+ { "rx_pause_packets" },
+ { "rx_bcast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_ucast_packets" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+
+ { "tx_lo_priority_octets" },
+ { "tx_hi_priority_octets" },
+ { "tx_late_collisions" },
+ { "tx_pause_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_deferred" },
+ { "tx_total_collisions" },
+ { "tx_excessive_collisions" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+/**
+ * netdev_get_strings - get statistics identity strings
+ * @dev: Network device.
+ * @stringset: String set identifier.
+ * @buf: Buffer to store the strings.
+ *
+ * This procedure returns the strings used to identify the statistics.
+ */
+static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (ETH_SS_STATS == stringset)
+ memcpy(buf, &ethtool_stats_keys,
+ ETH_GSTRING_LEN * hw->mib_cnt);
+}
+
+/**
+ * netdev_get_sset_count - get statistics size
+ * @dev: Network device.
+ * @sset: The statistics set number.
+ *
+ * This function returns the size of the statistics to be reported.
+ *
+ * Return size of the statistics to be reported.
+ */
+static int netdev_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return hw->mib_cnt;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * netdev_get_ethtool_stats - get network device statistics
+ * @dev: Network device.
+ * @stats: Ethtool statistics data structure.
+ * @data: Buffer to store the statistics.
+ *
+ * This procedure returns the statistics.
+ */
+static void netdev_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int n_stats = stats->n_stats;
+ int i;
+ int n;
+ int p;
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ if (media_connected == hw->port_mib[p].state) {
+ hw_priv->counter[p].read = 1;
+
+ /* Remember first port that requests read. */
+ if (n == SWITCH_PORT_NUM)
+ n = p;
+ }
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ if (n < SWITCH_PORT_NUM)
+ schedule_work(&hw_priv->mib_read);
+
+ if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
+ p = n;
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ } else
+ for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
+ if (0 == i) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 2);
+ } else if (hw->port_mib[p].cnt_ptr) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ }
+ }
+
+ get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
+ n = hw->mib_cnt;
+ if (n > n_stats)
+ n = n_stats;
+ n_stats -= n;
+ for (i = 0; i < n; i++)
+ *data++ = counter[i];
+}
+
+/**
+ * netdev_get_rx_csum - get receive checksum support
+ * @dev: Network device.
+ *
+ * This function gets receive checksum support setting.
+ *
+ * Return true if receive checksum is enabled; false otherwise.
+ */
+static u32 netdev_get_rx_csum(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ return hw->rx_cfg &
+ (DMA_RX_CSUM_UDP |
+ DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+}
+
+/**
+ * netdev_set_rx_csum - set receive checksum support
+ * @dev: Network device.
+ * @data: Zero to disable receive checksum support.
+ *
+ * This function sets receive checksum support setting.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ u32 new_setting = hw->rx_cfg;
+
+ if (data)
+ new_setting |=
+ (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ else
+ new_setting &=
+ ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ new_setting &= ~DMA_RX_CSUM_UDP;
+ mutex_lock(&hw_priv->lock);
+ if (new_setting != hw->rx_cfg) {
+ hw->rx_cfg = new_setting;
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ }
+ mutex_unlock(&hw_priv->lock);
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_regs_len = netdev_get_regs_len,
+ .get_regs = netdev_get_regs,
+ .get_wol = netdev_get_wol,
+ .set_wol = netdev_set_wol,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_eeprom_len = netdev_get_eeprom_len,
+ .get_eeprom = netdev_get_eeprom,
+ .set_eeprom = netdev_set_eeprom,
+ .get_pauseparam = netdev_get_pauseparam,
+ .set_pauseparam = netdev_set_pauseparam,
+ .get_ringparam = netdev_get_ringparam,
+ .get_strings = netdev_get_strings,
+ .get_sset_count = netdev_get_sset_count,
+ .get_ethtool_stats = netdev_get_ethtool_stats,
+ .get_rx_csum = netdev_get_rx_csum,
+ .set_rx_csum = netdev_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+};
+
+/*
+ * Hardware monitoring
+ */
+
+static void update_link(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_port *port)
+{
+ if (priv->media_state != port->linked->state) {
+ priv->media_state = port->linked->state;
+ if (netif_running(dev)) {
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+ }
+ }
+}
+
+static void mib_read_work(struct work_struct *work)
+{
+ struct dev_info *hw_priv =
+ container_of(work, struct dev_info, mib_read);
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port_mib *mib;
+ int i;
+
+ next_jiffies = jiffies;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ mib = &hw->port_mib[i];
+
+ /* Reading MIB counters or requested to read. */
+ if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
+
+ /* Need to process receive interrupt. */
+ if (port_r_cnt(hw, i))
+ break;
+ hw_priv->counter[i].read = 0;
+
+ /* Finish reading counters. */
+ if (0 == mib->cnt_ptr) {
+ hw_priv->counter[i].read = 2;
+ wake_up_interruptible(
+ &hw_priv->counter[i].counter);
+ }
+ } else if (jiffies >= hw_priv->counter[i].time) {
+ /* Only read MIB counters when the port is connected. */
+ if (media_connected == mib->state)
+ hw_priv->counter[i].read = 1;
+ next_jiffies += HZ * 1 * hw->mib_port_cnt;
+ hw_priv->counter[i].time = next_jiffies;
+
+ /* Port is just disconnected. */
+ } else if (mib->link_down) {
+ mib->link_down = 0;
+
+ /* Read counters one last time after link is lost. */
+ hw_priv->counter[i].read = 1;
+ }
+ }
+}
+
+static void mib_monitor(unsigned long ptr)
+{
+ struct dev_info *hw_priv = (struct dev_info *) ptr;
+
+ mib_read_work(&hw_priv->mib_read);
+
+ /* This is used to verify Wake-on-LAN is working. */
+ if (hw_priv->pme_wait) {
+ if (hw_priv->pme_wait <= jiffies) {
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ hw_priv->pme_wait = 0;
+ }
+ } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
+
+ /* PME is asserted. Wait 2 seconds to clear it. */
+ hw_priv->pme_wait = jiffies + HZ * 2;
+ }
+
+ ksz_update_timer(&hw_priv->mib_timer_info);
+}
+
+/**
+ * dev_monitor - periodic monitoring
+ * @ptr: Network device pointer.
+ *
+ * This routine is run in a kernel timer to monitor the network device.
+ */
+static void dev_monitor(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ if (!(hw->features & LINK_INT_WORKING))
+ port_get_link_speed(port);
+ update_link(dev, priv, port);
+
+ ksz_update_timer(&priv->monitor_timer_info);
+}
+
+/*
+ * Linux network device interface functions
+ */
+
+/* Driver exported variables */
+
+static int msg_enable;
+
+static char *macaddr = ":";
+static char *mac1addr = ":";
+
+/*
+ * This enables multiple network device mode for KSZ8842, which contains a
+ * switch with two physical ports. Some users like to take control of the
+ * ports for running Spanning Tree Protocol. The driver will create an
+ * additional eth? device for the other port.
+ *
+ * Some limitations are the network devices cannot have different MTU and
+ * multicast hash tables.
+ */
+static int multi_dev;
+
+/*
+ * As most users select multiple network device mode to use Spanning Tree
+ * Protocol, this enables a feature in which most unicast and multicast packets
+ * are forwarded inside the switch and not passed to the host. Only packets
+ * that need the host's attention are passed to it. This prevents the host
+ * wasting CPU time to examine each and every incoming packets and do the
+ * forwarding itself.
+ *
+ * As the hack requires the private bridge header, the driver cannot compile
+ * with just the kernel headers.
+ *
+ * Enabling STP support also turns on multiple network device mode.
+ */
+static int stp;
+
+/*
+ * This enables fast aging in the KSZ8842 switch. Not sure what situation
+ * needs that. However, fast aging is used to flush the dynamic MAC table when
+ * STP suport is enabled.
+ */
+static int fast_aging;
+
+/**
+ * netdev_init - initalize network device.
+ * @dev: Network device.
+ *
+ * This function initializes the network device.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int __init netdev_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
+ dev_monitor, dev);
+
+ /* 500 ms timeout */
+ dev->watchdog_timeo = HZ / 2;
+
+ dev->features |= NETIF_F_IP_CSUM;
+
+ /*
+ * Hardware does not really support IPv6 checksum generation, but
+ * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
+ */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ dev->features |= NETIF_F_SG;
+
+ sema_init(&priv->proc_sem, 1);
+
+ priv->mii_if.phy_id_mask = 0x1;
+ priv->mii_if.reg_num_mask = 0x7;
+ priv->mii_if.dev = dev;
+ priv->mii_if.mdio_read = mdio_read;
+ priv->mii_if.mdio_write = mdio_write;
+ priv->mii_if.phy_id = priv->port.first_port + 1;
+
+ priv->msg_enable = netif_msg_init(msg_enable,
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_init = netdev_init,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_get_stats = netdev_query_statistics,
+ .ndo_start_xmit = netdev_tx,
+ .ndo_tx_timeout = netdev_tx_timeout,
+ .ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_set_rx_mode = netdev_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netdev_netpoll,
+#endif
+};
+
+static void netdev_free(struct net_device *dev)
+{
+ if (dev->watchdog_timeo)
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+struct platform_info {
+ struct dev_info dev_info;
+ struct net_device *netdev[SWITCH_PORT_NUM];
+};
+
+static int net_device_present;
+
+static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
+{
+ int i;
+ int j;
+ int got_num;
+ int num;
+
+ i = j = num = got_num = 0;
+ while (j < MAC_ADDR_LEN) {
+ if (macaddr[i]) {
+ got_num = 1;
+ if ('0' <= macaddr[i] && macaddr[i] <= '9')
+ num = num * 16 + macaddr[i] - '0';
+ else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
+ num = num * 16 + 10 + macaddr[i] - 'A';
+ else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
+ num = num * 16 + 10 + macaddr[i] - 'a';
+ else if (':' == macaddr[i])
+ got_num = 2;
+ else
+ break;
+ } else if (got_num)
+ got_num = 2;
+ else
+ break;
+ if (2 == got_num) {
+ if (MAIN_PORT == port) {
+ hw_priv->hw.override_addr[j++] = (u8) num;
+ hw_priv->hw.override_addr[5] +=
+ hw_priv->hw.id;
+ } else {
+ hw_priv->hw.ksz_switch->other_addr[j++] =
+ (u8) num;
+ hw_priv->hw.ksz_switch->other_addr[5] +=
+ hw_priv->hw.id;
+ }
+ num = got_num = 0;
+ }
+ i++;
+ }
+ if (MAC_ADDR_LEN == j) {
+ if (MAIN_PORT == port)
+ hw_priv->hw.mac_override = 1;
+ }
+}
+
+#define KS884X_DMA_MASK (~0x0UL)
+
+static void read_other_addr(struct ksz_hw *hw)
+{
+ int i;
+ u16 data[3];
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (i = 0; i < 3; i++)
+ data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
+ if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
+ sw->other_addr[5] = (u8) data[0];
+ sw->other_addr[4] = (u8)(data[0] >> 8);
+ sw->other_addr[3] = (u8) data[1];
+ sw->other_addr[2] = (u8)(data[1] >> 8);
+ sw->other_addr[1] = (u8) data[2];
+ sw->other_addr[0] = (u8)(data[2] >> 8);
+ }
+}
+
+#ifndef PCI_VENDOR_ID_MICREL_KS
+#define PCI_VENDOR_ID_MICREL_KS 0x16c6
+#endif
+
+static int __init pcidev_init(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ struct dev_info *hw_priv;
+ struct ksz_hw *hw;
+ struct platform_info *info;
+ struct ksz_port *port;
+ unsigned long reg_base;
+ unsigned long reg_len;
+ int cnt;
+ int i;
+ int mib_port_count;
+ int pi;
+ int port_count;
+ int result;
+ char banner[80];
+ struct ksz_switch *sw = NULL;
+
+ result = pci_enable_device(pdev);
+ if (result)
+ return result;
+
+ result = -ENODEV;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return result;
+
+ reg_base = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
+ return result;
+
+ if (!request_mem_region(reg_base, reg_len, DRV_NAME))
+ return result;
+ pci_set_master(pdev);
+
+ result = -ENOMEM;
+
+ info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!info)
+ goto pcidev_init_dev_err;
+ memset(info, 0, sizeof(struct platform_info));
+
+ hw_priv = &info->dev_info;
+ hw_priv->pdev = pdev;
+
+ hw = &hw_priv->hw;
+
+ hw->io = ioremap(reg_base, reg_len);
+ if (!hw->io)
+ goto pcidev_init_io_err;
+
+ cnt = hw_init(hw);
+ if (!cnt) {
+ if (msg_enable & NETIF_MSG_PROBE)
+ printk(KERN_ALERT "chip not detected\n");
+ result = -ENODEV;
+ goto pcidev_init_alloc_err;
+ }
+
+ sprintf(banner, "%s\n", version);
+ banner[13] = cnt + '0';
+ ks_info(hw_priv, "%s", banner);
+ ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+
+ /* Assume device is KSZ8841. */
+ hw->dev_count = 1;
+ port_count = 1;
+ mib_port_count = 1;
+ hw->addr_list_size = 0;
+ hw->mib_cnt = PORT_COUNTER_NUM;
+ hw->mib_port_cnt = 1;
+
+ /* KSZ8842 has a switch with multiple ports. */
+ if (2 == cnt) {
+ if (fast_aging)
+ hw->overrides |= FAST_AGING;
+
+ hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
+
+ /* Multiple network device interfaces are required. */
+ if (multi_dev) {
+ hw->dev_count = SWITCH_PORT_NUM;
+ hw->addr_list_size = SWITCH_PORT_NUM - 1;
+ }
+
+ /* Single network device has multiple ports. */
+ if (1 == hw->dev_count) {
+ port_count = SWITCH_PORT_NUM;
+ mib_port_count = SWITCH_PORT_NUM;
+ }
+ hw->mib_port_cnt = TOTAL_PORT_NUM;
+ hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ if (!hw->ksz_switch)
+ goto pcidev_init_alloc_err;
+ memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
+
+ sw = hw->ksz_switch;
+ }
+ for (i = 0; i < hw->mib_port_cnt; i++)
+ hw->port_mib[i].mib_start = 0;
+
+ hw->parent = hw_priv;
+
+ /* Default MTU is 1500. */
+ hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
+
+ if (ksz_alloc_mem(hw_priv))
+ goto pcidev_init_mem_err;
+
+ hw_priv->hw.id = net_device_present;
+
+ spin_lock_init(&hw_priv->hwlock);
+ mutex_init(&hw_priv->lock);
+
+ /* tasklet is enabled. */
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
+
+ /* tasklet_enable will decrement the atomic counter. */
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+
+ for (i = 0; i < TOTAL_PORT_NUM; i++)
+ init_waitqueue_head(&hw_priv->counter[i].counter);
+
+ if (macaddr[0] != ':')
+ get_mac_addr(hw_priv, macaddr, MAIN_PORT);
+
+ /* Read MAC address and initialize override address if not overrided. */
+ hw_read_addr(hw);
+
+ /* Multiple device interfaces mode requires a second MAC address. */
+ if (hw->dev_count > 1) {
+ memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ read_other_addr(hw);
+ if (mac1addr[0] != ':')
+ get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
+ }
+
+ hw_setup(hw);
+ if (hw->ksz_switch)
+ sw_setup(hw);
+ else {
+ hw_priv->wol_support = WOL_SUPPORT;
+ hw_priv->wol_enable = 0;
+ }
+
+ INIT_WORK(&hw_priv->mib_read, mib_read_work);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
+ mib_monitor, hw_priv);
+
+ for (i = 0; i < hw->dev_count; i++) {
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev)
+ goto pcidev_init_reg_err;
+ info->netdev[i] = dev;
+
+ priv = netdev_priv(dev);
+ priv->adapter = hw_priv;
+ priv->id = net_device_present++;
+
+ port = &priv->port;
+ port->port_cnt = port_count;
+ port->mib_port_cnt = mib_port_count;
+ port->first_port = i;
+ port->flow_ctrl = PHY_FLOW_CTRL;
+
+ port->hw = hw;
+ port->linked = &hw->port_info[port->first_port];
+
+ for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
+ hw->port_info[pi].port_id = pi;
+ hw->port_info[pi].pdev = dev;
+ hw->port_info[pi].state = media_disconnected;
+ }
+
+ dev->mem_start = (unsigned long) hw->io;
+ dev->mem_end = dev->mem_start + reg_len - 1;
+ dev->irq = pdev->irq;
+ if (MAIN_PORT == i)
+ memcpy(dev->dev_addr, hw_priv->hw.override_addr,
+ MAC_ADDR_LEN);
+ else {
+ memcpy(dev->dev_addr, sw->other_addr,
+ MAC_ADDR_LEN);
+ if (!memcmp(sw->other_addr, hw->override_addr,
+ MAC_ADDR_LEN))
+ dev->dev_addr[5] += port->first_port;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ if (register_netdev(dev))
+ goto pcidev_init_reg_err;
+ port_set_power_saving(port, true);
+ }
+
+ pci_dev_get(hw_priv->pdev);
+ pci_set_drvdata(pdev, info);
+ return 0;
+
+pcidev_init_reg_err:
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ netdev_free(info->netdev[i]);
+ info->netdev[i] = NULL;
+ }
+ }
+
+pcidev_init_mem_err:
+ ksz_free_mem(hw_priv);
+ kfree(hw->ksz_switch);
+
+pcidev_init_alloc_err:
+ iounmap(hw->io);
+
+pcidev_init_io_err:
+ kfree(info);
+
+pcidev_init_dev_err:
+ release_mem_region(reg_base, reg_len);
+
+ return result;
+}
+
+static void pcidev_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+
+ pci_set_drvdata(pdev, NULL);
+
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ for (i = 0; i < hw_priv->hw.dev_count; i++) {
+ if (info->netdev[i])
+ netdev_free(info->netdev[i]);
+ }
+ if (hw_priv->hw.io)
+ iounmap(hw_priv->hw.io);
+ ksz_free_mem(hw_priv);
+ kfree(hw_priv->hw.ksz_switch);
+ pci_dev_put(hw_priv->pdev);
+ kfree(info);
+}
+
+#ifdef CONFIG_PM
+static int pcidev_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (hw_priv->wol_enable)
+ hw_cfg_wol_pme(hw, 0);
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+ }
+ }
+ if (hw_priv->wol_enable) {
+ hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
+ hw_cfg_wol_pme(hw, 1);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+#endif
+
+static char pcidev_name[] = "ksz884xp";
+
+static struct pci_device_id pcidev_table[] = {
+ { PCI_VENDOR_ID_MICREL_KS, 0x8841,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MICREL_KS, 0x8842,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pcidev_table);
+
+static struct pci_driver pci_device_driver = {
+#ifdef CONFIG_PM
+ .suspend = pcidev_suspend,
+ .resume = pcidev_resume,
+#endif
+ .name = pcidev_name,
+ .id_table = pcidev_table,
+ .probe = pcidev_init,
+ .remove = pcidev_exit
+};
+
+static int __init ksz884x_init_module(void)
+{
+ return pci_register_driver(&pci_device_driver);
+}
+
+static void __exit ksz884x_cleanup_module(void)
+{
+ pci_unregister_driver(&pci_device_driver);
+}
+
+module_init(ksz884x_init_module);
+module_exit(ksz884x_cleanup_module);
+
+MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
+module_param(macaddr, charp, 0);
+module_param(mac1addr, charp, 0);
+module_param(fast_aging, int, 0);
+module_param(multi_dev, int, 0);
+module_param(stp, int, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+MODULE_PARM_DESC(mac1addr, "Second MAC address");
+MODULE_PARM_DESC(fast_aging, "Fast aging");
+MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
+MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 8d7d3d4625f6..7b9447646f8a 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1288,7 +1288,7 @@ static void set_multicast_list(struct net_device *dev)
} else {
short multicast_table[4];
int i;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b194..371b58b1d151 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
return i;
};
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
- dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
dev->name, dma, (int)sizeof(struct i596_dma),
@@ -1382,21 +1380,21 @@ static void set_multicast_list(struct net_device *dev)
}
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT) {
cnt = MAX_MC_CNT;
printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
cmd = &dma->mc_cmd;
cmd->cmd.command = SWAP16(CmdMulticastList);
- cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
for (dmi = dev->mc_list;
cnt && dmi != NULL;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index a8522bd73ae7..8442c47e93e8 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -232,7 +232,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
@@ -242,9 +242,9 @@ static void temac_set_multicast_list(struct net_device *ndev)
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mclist = ndev->mc_list;
- for (i = 0; mclist && i < ndev->mc_count; i++) {
+ for (i = 0; mclist && i < netdev_mc_count(ndev); i++) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index e20fefc73c8b..b1f5d79af61f 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1253,18 +1253,19 @@ static void set_multicast_list(struct net_device *dev) {
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
memcpy(cp, dmi,6);
@@ -1277,7 +1278,8 @@ static void set_multicast_list(struct net_device *dev) {
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
- if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f64..a8768672dc5a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
/* 2003-12-26: Make sure Asante cards always work. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -34,31 +36,36 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/hwtest.h>
#include <asm/macints.h>
static char version[] =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
-/* Unfortunately it seems we have to hardcode these for the moment */
-/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
#define DAYNA_8390_BASE 0x80000
#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
MAC8390_KINETICS,
};
-static const char * cardname[] = {
+static const char *cardname[] = {
"apple",
"asante",
"farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
"kinetics",
};
-static int word16[] = {
+static const int word16[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
};
/* on which cards do we use NuBus resources? */
-static int useresources[] = {
+static const int useresources[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
ACCESS_16,
};
-extern int mac8390_memtest(struct net_device * dev);
-static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
enum mac8390_type type);
-static int mac8390_open(struct net_device * dev);
-static int mac8390_close(struct net_device * dev);
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
static void mac8390_no_reset(struct net_device *dev);
static void interlan_reset(struct net_device *dev);
/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
-static void sane_block_input(struct net_device * dev, int count,
- struct sk_buff * skb, int ring_offset);
-static void sane_block_output(struct net_device * dev, int count,
- const unsigned char * buf, const int start_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
/* dayna_memcpy to and from card */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
switch (dev->dr_sw) {
- case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_APPLE_SONIC_NB:
- case NUBUS_DRHW_APPLE_SONIC_LC:
- case NUBUS_DRHW_SONNET:
- return MAC8390_NONE;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_ASANTE_LC:
- return MAC8390_NONE;
- break;
- case NUBUS_DRHW_CABLETRON:
- return MAC8390_CABLETRON;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_ASANTE:
- return MAC8390_ASANTE;
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_TECHWORKS:
- case NUBUS_DRSW_DAYNA2:
- case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
- return MAC8390_CABLETRON;
- else
- return MAC8390_APPLE;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
break;
-
- case NUBUS_DRSW_FARALLON:
- return MAC8390_FARALLON;
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_INTERLAN:
- return MAC8390_INTERLAN;
- break;
- default:
- return MAC8390_KINETICS;
- break;
- }
- break;
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
- case NUBUS_DRSW_DAYNA:
- // These correspond to Dayna Sonic cards
- // which use the macsonic driver
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN )
- return MAC8390_NONE;
- else
- return MAC8390_DAYNA;
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
}
return MAC8390_NONE;
}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy((char *)membase, (char *)&outdata, 4);
+ memcpy(membase, &outdata, 4);
/* Now compare them */
if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
- word_memcpy_tocard((char *)membase, (char *)&outdata, 4);
+ word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
- word_memcpy_fromcard((char *)&indata, (char *)membase, 4);
+ word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
local_irq_save(flags);
/* Check up to 32K in 4K increments */
for (i = 0; i < 8; i++) {
- volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
/* Unwriteable - we have a fully decoded card and the
RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
/* check for partial decode and wrap */
for (j = 0; j < i; j++) {
- volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
if (*p != (0xA5A0 | j))
break;
- }
- }
+ }
+ }
local_irq_restore(flags);
- /* in any case, we stopped once we tried one block too many,
- or once we reached 32K */
- return i * 0x1000;
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
}
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- volatile unsigned short *i;
- int version_disp = 0;
- struct nubus_dev * ndev = NULL;
+ struct nubus_dev *ndev = NULL;
int err = -ENODEV;
- struct nubus_dir dir;
- struct nubus_dirent ent;
- int offset;
static unsigned int slots;
enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
/* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
+ if (slots & (1 << ndev->board->slot))
continue;
- slots |= 1<<ndev->board->slot;
+ slots |= 1 << ndev->board->slot;
- if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
continue;
- if (version_disp == 0) {
- version_disp = 1;
- printk(version);
- }
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
- /* This is getting to be a habit */
- dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
-
- /* Get some Nubus info - we will trust the card's idea
- of where its memory and registers are. */
-
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- printk(KERN_ERR "%s: Unable to get Nubus functional"
- " directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (!mac8390_init(dev, ndev, cardtype))
continue;
- }
-
- /* Get the MAC address */
- if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
- printk(KERN_INFO "%s: Couldn't get MAC address!\n",
- dev->name);
- continue;
- } else {
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
- }
-
- if (useresources[cardtype] == 1) {
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
- printk(KERN_ERR "%s: Memory offset resource"
- " for slot %X not found!\n",
- dev->name, ndev->board->slot);
- continue;
- }
- nubus_get_rsrc_mem(&offset, &ent, 4);
- dev->mem_start = dev->base_addr + offset;
- /* yes, this is how the Apple driver does it */
- dev->base_addr = dev->mem_start + 0x10000;
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
- printk(KERN_INFO "%s: Memory length resource"
- " for slot %X not found"
- ", probing\n",
- dev->name, ndev->board->slot);
- offset = mac8390_memsize(dev->mem_start);
- } else {
- nubus_get_rsrc_mem(&offset, &ent, 4);
- }
- dev->mem_end = dev->mem_start + offset;
- } else {
- switch (cardtype) {
- case MAC8390_KINETICS:
- case MAC8390_DAYNA: /* it's the same */
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_INTERLAN:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_CABLETRON:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_MEM);
- /* The base address is unreadable if 0x00
- * has been written to the command register
- * Reset the chip by writing E8390_NODMA +
- * E8390_PAGE0 + E8390_STOP just to be
- * sure
- */
- i = (void *)dev->base_addr;
- *i = 0x21;
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
-
- default:
- printk(KERN_ERR "Card type %s is"
- " unsupported, sorry\n",
- ndev->board->name);
- continue;
- }
- }
/* Do the nasty 8390 stuff */
if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
dev_mac890[i] = dev;
}
if (!i) {
- printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ pr_notice("No useable cards found, driver NOT installed.\n");
return -ENODEV;
}
return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
- enum mac8390_type type)
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
{
- static u32 fwrd4_offsets[16]={
+ static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
16, 20, 24, 28,
32, 36, 40, 44,
48, 52, 56, 60
};
- static u32 back4_offsets[16]={
+ static u32 back4_offsets[16] = {
60, 56, 52, 48,
44, 40, 36, 32,
28, 24, 20, 16,
12, 8, 4, 0
};
- static u32 fwrd2_offsets[16]={
+ static u32 fwrd2_offsets[16] = {
0, 2, 4, 6,
8, 10, 12, 14,
16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Cabletron's TX/RX buffers are backwards */
if (type == MAC8390_CABLETRON) {
- ei_status.tx_start_page = CABLETRON_TX_START_PG;
- ei_status.rx_start_page = CABLETRON_RX_START_PG;
- ei_status.stop_page = CABLETRON_RX_STOP_PG;
- ei_status.rmem_start = dev->mem_start;
- ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
} else {
- ei_status.tx_start_page = WD_START_PG;
- ei_status.rx_start_page = WD_START_PG + TX_PAGES;
- ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- ei_status.rmem_end = dev->mem_end;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
}
/* Fill in model-specific information and functions */
- switch(type) {
+ switch (type) {
case MAC8390_FARALLON:
case MAC8390_APPLE:
- switch(mac8390_testio(dev->mem_start)) {
- case ACCESS_UNKNOWN:
- printk("Don't know how to access card memory!\n");
- return -ENODEV;
- break;
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_info("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
- case ACCESS_16:
- /* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- break;
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
- case ACCESS_32:
- /* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- access_bitmode = 1;
- break;
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
}
break;
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
ei_status.block_input = &slow_sane_block_input;
ei_status.block_output = &slow_sane_block_output;
ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = fwrd4_offsets;
- break;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
default:
- printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name);
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO
- "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
{
__ei_open(dev);
if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
{
ei_status.txing = 0;
if (ei_debug > 1)
- printk("reset not supported\n");
+ pr_info("reset not supported\n");
return;
}
static void interlan_reset(struct net_device *dev)
{
- unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq));
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
if (ei_debug > 1)
- printk("Need to reset the NS8390 t=%lu...", jiffies);
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
if (ei_debug > 1)
- printk("reset complete\n");
+ pr_cont("reset complete\n");
return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
/* directly from daynaport.c by Alan Cox */
-static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
{
volatile unsigned char *ptr;
- unsigned char *target=to;
- from<<=1; /* word, skip overhead */
- ptr=(unsigned char *)(dev->mem_start+from);
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
/* Leading byte? */
- if (from&2) {
+ if (from & 2) {
*target++ = ptr[-1];
ptr += 2;
count--;
}
- while(count>=2)
- {
+ while (count >= 2) {
*(unsigned short *)target = *(unsigned short volatile *)ptr;
ptr += 4; /* skip cruft */
target += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
+ if (count)
*target = *ptr;
}
-static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned char *src=from;
- to<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+to);
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
/* Leading byte? */
- if (to&2) { /* avoid a byte write (stomps on other data) */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
- while(count>=2)
- {
- *ptr++=*(unsigned short *)src; /* Copy and */
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
src += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
- {
+ if (count) {
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(*src << 8);
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
}
}
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ memcpy_toio(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
}
/* dayna block input/output */
-static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
/* Fix endianness */
- hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
}
-static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
/* Note the offset math is done in card memory space which is word
per long onto our space. */
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
dayna_memcpy_fromcard(dev, skb->data + semi_count,
ei_status.rmem_start - dev->mem_start,
count);
- }
- else
- {
+ } else {
dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
}
}
-static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
}
/* Cabletron block I/O */
-static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page)
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
-static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, semi_count);
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
(char *)ei_status.rmem_start, count);
- }
- else
- {
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, count);
+ } else {
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base, count);
}
}
-static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
const unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
const volatile unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 1d0d4d9ab623..7a5f89728a81 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -189,18 +189,11 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
+ struct phy_device *phydev;
struct eth_platform_data *pdata;
- int phy_addr;
-
- /* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (bp->mii_bus->phy_map[phy_addr]) {
- phydev = bp->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ int ret;
+ phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
printk (KERN_ERR "%s: no PHY found\n", dev->name);
return -1;
@@ -210,17 +203,13 @@ static int macb_mii_probe(struct net_device *dev)
/* TODO : add pin_irq */
/* attach the mac to the phy */
- if (pdata && pdata->is_rmii) {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ pdata && pdata->is_rmii ?
+ PHY_INTERFACE_MODE_RMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
/* mask with MAC supported features */
@@ -901,7 +890,7 @@ static void macb_sethashtable(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
@@ -934,7 +923,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, HRB, -1);
macb_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
macb_sethashtable(dev);
cfg |= MACB_BIT(NCFGR_MTI);
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index d9fbad386389..fdb0bbdd6782 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -606,7 +606,7 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 44f3c2896f20..740accbf0806 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -518,7 +518,7 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b34..40faa368b07a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,31 +39,6 @@ struct macvlan_port {
struct list_head vlans;
};
-/**
- * struct macvlan_rx_stats - MACVLAN percpu rx stats
- * @rx_packets: number of received packets
- * @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
- * @rx_errors: number of errors
- */
-struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
-};
-
-struct macvlan_dev {
- struct net_device *dev;
- struct list_head list;
- struct hlist_node hlist;
- struct macvlan_port *port;
- struct net_device *lowerdev;
- struct macvlan_rx_stats *rx_stats;
- enum macvlan_mode mode;
-};
-
-
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -118,31 +93,17 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
return 0;
}
-static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast)
-{
- struct macvlan_rx_stats *rx_stats;
-
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
- if (likely(success)) {
- rx_stats->rx_packets++;;
- rx_stats->rx_bytes += len;
- if (multicast)
- rx_stats->multicast++;
- } else {
- rx_stats->rx_errors++;
- }
-}
-static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
+static int macvlan_broadcast_one(struct sk_buff *skb,
+ const struct macvlan_dev *vlan,
const struct ethhdr *eth, bool local)
{
+ struct net_device *dev = vlan->dev;
if (!skb)
return NET_RX_DROP;
if (local)
- return dev_forward_skb(dev, skb);
+ return vlan->forward(dev, skb);
skb->dev = dev;
if (!compare_ether_addr_64bits(eth->h_dest,
@@ -151,7 +112,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return vlan->receive(skb);
}
static void macvlan_broadcast(struct sk_buff *skb,
@@ -175,7 +136,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan->dev, eth,
+ err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
@@ -238,7 +199,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- netif_rx(skb);
+ vlan->receive(skb);
return NULL;
}
@@ -260,7 +221,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
unsigned int length = skb->len + ETH_HLEN;
- int ret = dev_forward_skb(dest->dev, skb);
+ int ret = dest->forward(dest->dev, skb);
macvlan_count_rx(dest, length,
ret == NET_RX_SUCCESS, 0);
@@ -269,12 +230,12 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
- skb->dev = vlan->lowerdev;
+ skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}
-static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -290,6 +251,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
return ret;
}
+EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -418,7 +380,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -623,8 +585,11 @@ static int macvlan_get_tx_queues(struct net *net,
return 0;
}
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ int (*receive)(struct sk_buff *skb),
+ int (*forward)(struct net_device *dev,
+ struct sk_buff *skb))
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -664,6 +629,8 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
+ vlan->receive = receive;
+ vlan->forward = forward;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
@@ -677,8 +644,17 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
}
+EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static void macvlan_dellink(struct net_device *dev, struct list_head *head)
+static int macvlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ netif_rx,
+ dev_forward_skb);
+}
+
+void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port = vlan->port;
@@ -689,6 +665,7 @@ static void macvlan_dellink(struct net_device *dev, struct list_head *head)
if (list_empty(&port->vlans))
macvlan_port_destroy(port->dev);
}
+EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -720,19 +697,27 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
};
-static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
+int macvlan_link_register(struct rtnl_link_ops *ops)
+{
+ /* common fields */
+ ops->priv_size = sizeof(struct macvlan_dev);
+ ops->get_tx_queues = macvlan_get_tx_queues;
+ ops->setup = macvlan_setup;
+ ops->validate = macvlan_validate;
+ ops->maxtype = IFLA_MACVLAN_MAX;
+ ops->policy = macvlan_policy;
+ ops->changelink = macvlan_changelink;
+ ops->get_size = macvlan_get_size;
+ ops->fill_info = macvlan_fill_info;
+
+ return rtnl_link_register(ops);
+};
+EXPORT_SYMBOL_GPL(macvlan_link_register);
+
+static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
- .priv_size = sizeof(struct macvlan_dev),
- .get_tx_queues = macvlan_get_tx_queues,
- .setup = macvlan_setup,
- .validate = macvlan_validate,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
- .maxtype = IFLA_MACVLAN_MAX,
- .policy = macvlan_policy,
- .changelink = macvlan_changelink,
- .get_size = macvlan_get_size,
- .fill_info = macvlan_fill_info,
};
static int macvlan_device_event(struct notifier_block *unused,
@@ -761,7 +746,7 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_UNREGISTER:
list_for_each_entry_safe(vlan, next, &port->vlans, list)
- macvlan_dellink(vlan->dev, NULL);
+ vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
}
return NOTIFY_DONE;
@@ -778,7 +763,7 @@ static int __init macvlan_init_module(void)
register_netdevice_notifier(&macvlan_notifier_block);
macvlan_handle_frame_hook = macvlan_handle_frame;
- err = rtnl_link_register(&macvlan_link_ops);
+ err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
new file mode 100644
index 000000000000..fe7656bf68c6
--- /dev/null
+++ b/drivers/net/macvtap.c
@@ -0,0 +1,590 @@
+#include <linux/etherdevice.h>
+#include <linux/if_macvlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+
+/*
+ * A macvtap queue is the central object of this driver, it connects
+ * an open character device to a macvlan interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * macvtap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ * TODO: multiqueue support is currently not implemented, even though
+ * macvtap is basically prepared for that. We will need to add this
+ * here as well as in virtio-net and qemu to get line rate on 10gbit
+ * adapters from a guest.
+ */
+struct macvtap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct macvlan_dev *vlan;
+ struct file *file;
+};
+
+static struct proto macvtap_proto = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof (struct macvtap_queue),
+};
+
+/*
+ * Minor number matches netdev->ifindex, so need a potentially
+ * large value. This also makes it possible to split the
+ * tap functionality out again in the future by offering it
+ * from other drivers besides macvtap. As long as every device
+ * only has one tap, the interface numbers assure that the
+ * device nodes are unique.
+ */
+static unsigned int macvtap_major;
+#define MACVTAP_NUM_DEVS 65536
+static struct class *macvtap_class;
+static struct cdev macvtap_cdev;
+
+/*
+ * RCU usage:
+ * The macvtap_queue is referenced both from the chardev struct file
+ * and from the struct macvlan_dev using rcu_read_lock.
+ *
+ * We never actually update the contents of a macvtap_queue atomically
+ * with RCU but it is used for race-free destruction of a queue when
+ * either the file or the macvlan_dev goes away. Pointers back to
+ * the dev and the file are implicitly valid as long as the queue
+ * exists.
+ *
+ * The callbacks from macvlan are always done with rcu_read_lock held
+ * already. For calls from file_operations, we use the rcu_read_lock_bh
+ * to get a reference count on the socket and the device.
+ *
+ * When destroying a queue, we remove the pointers from the file and
+ * from the dev and then synchronize_rcu to make sure no thread is
+ * still using the queue. There may still be references to the struct
+ * sock inside of the queue from outbound SKBs, but these never
+ * reference back to the file or the dev. The data structure is freed
+ * through __sk_free when both our references and any pending SKBs
+ * are gone.
+ *
+ * macvtap_lock is only used to prevent multiple concurrent open()
+ * calls to assign a new vlan->tap pointer. It could be moved into
+ * the macvlan_dev itself but is extremely rarely used.
+ */
+static DEFINE_SPINLOCK(macvtap_lock);
+
+/*
+ * Choose the next free queue, for now there is only one
+ */
+static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ int err = -EBUSY;
+
+ spin_lock(&macvtap_lock);
+ if (rcu_dereference(vlan->tap))
+ goto out;
+
+ err = 0;
+ q->vlan = vlan;
+ rcu_assign_pointer(vlan->tap, q);
+
+ q->file = file;
+ rcu_assign_pointer(file->private_data, q);
+
+out:
+ spin_unlock(&macvtap_lock);
+ return err;
+}
+
+/*
+ * We must destroy each queue exactly once, when either
+ * the netdev or the file go away.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ *
+ * synchronize_rcu serializes with the packet flow
+ * that uses rcu_read_lock.
+ */
+static void macvtap_del_queue(struct macvtap_queue **qp)
+{
+ struct macvtap_queue *q;
+
+ spin_lock(&macvtap_lock);
+ q = rcu_dereference(*qp);
+ if (!q) {
+ spin_unlock(&macvtap_lock);
+ return;
+ }
+
+ rcu_assign_pointer(q->vlan->tap, NULL);
+ rcu_assign_pointer(q->file->private_data, NULL);
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Since we only support one queue, just dereference the pointer.
+ */
+static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ return rcu_dereference(vlan->tap);
+}
+
+static void macvtap_del_queues(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ macvtap_del_queue(&vlan->tap);
+}
+
+static inline struct macvtap_queue *macvtap_file_get_queue(struct file *file)
+{
+ struct macvtap_queue *q;
+ rcu_read_lock_bh();
+ q = rcu_dereference(file->private_data);
+ if (q) {
+ sock_hold(&q->sk);
+ dev_hold(q->vlan->dev);
+ }
+ rcu_read_unlock_bh();
+ return q;
+}
+
+static inline void macvtap_file_put_queue(struct macvtap_queue *q)
+{
+ sock_put(&q->sk);
+ dev_put(q->vlan->dev);
+}
+
+/*
+ * Forward happens for data that gets sent from one macvlan
+ * endpoint to another one in bridge mode. We just take
+ * the skb and put it into the receive queue.
+ */
+static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+{
+ struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ if (!q)
+ return -ENOLINK;
+
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ wake_up(q->sk.sk_sleep);
+ return 0;
+}
+
+/*
+ * Receive is for data from the external interface (lowerdev),
+ * in case of macvtap, we can treat that the same way as
+ * forward, which macvlan cannot.
+ */
+static int macvtap_receive(struct sk_buff *skb)
+{
+ skb_push(skb, ETH_HLEN);
+ return macvtap_forward(skb->dev, skb);
+}
+
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ err = macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
+ if (err)
+ goto out;
+
+ devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ err = PTR_ERR(classdev);
+ macvtap_del_queues(dev);
+ }
+
+out:
+ return err;
+}
+
+static void macvtap_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ device_destroy(macvtap_class,
+ MKDEV(MAJOR(macvtap_major), dev->ifindex));
+
+ macvtap_del_queues(dev);
+ macvlan_dellink(dev, head);
+}
+
+static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+ .kind = "macvtap",
+ .newlink = macvtap_newlink,
+ .dellink = macvtap_dellink,
+};
+
+
+static void macvtap_sock_write_space(struct sock *sk)
+{
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+}
+
+static int macvtap_open(struct inode *inode, struct file *file)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct macvtap_queue *q;
+ int err;
+
+ err = -ENODEV;
+ if (!dev)
+ goto out;
+
+ /* check if this is a macvtap device */
+ err = -EINVAL;
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ goto out;
+
+ err = -ENOMEM;
+ q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &macvtap_proto);
+ if (!q)
+ goto out;
+
+ init_waitqueue_head(&q->sock.wait);
+ q->sock.type = SOCK_RAW;
+ q->sock.state = SS_CONNECTED;
+ sock_init_data(&q->sock, &q->sk);
+ q->sk.sk_allocation = GFP_ATOMIC; /* for now */
+ q->sk.sk_write_space = macvtap_sock_write_space;
+
+ err = macvtap_set_queue(dev, file, q);
+ if (err)
+ sock_put(&q->sk);
+
+out:
+ if (dev)
+ dev_put(dev);
+
+ return err;
+}
+
+static int macvtap_release(struct inode *inode, struct file *file)
+{
+ macvtap_del_queue((struct macvtap_queue **)&file->private_data);
+ return 0;
+}
+
+static unsigned int macvtap_poll(struct file *file, poll_table * wait)
+{
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+ unsigned int mask = POLLERR;
+
+ if (!q)
+ goto out;
+
+ mask = 0;
+ poll_wait(file, &q->sock.wait, wait);
+
+ if (!skb_queue_empty(&q->sk.sk_receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+ macvtap_file_put_queue(q);
+out:
+ return mask;
+}
+
+/* Get packet from user space buffer */
+static ssize_t macvtap_get_user(struct macvtap_queue *q,
+ const struct iovec *iv, size_t count,
+ int noblock)
+{
+ struct sk_buff *skb;
+ size_t len = count;
+ int err;
+
+ if (unlikely(len < ETH_HLEN))
+ return -EINVAL;
+
+ skb = sock_alloc_send_skb(&q->sk, NET_IP_ALIGN + len, noblock, &err);
+
+ if (!skb) {
+ macvlan_count_rx(q->vlan, 0, false, false);
+ return err;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, count);
+
+ if (skb_copy_datagram_from_iovec(skb, 0, iv, 0, len)) {
+ macvlan_count_rx(q->vlan, 0, false, false);
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ skb_set_network_header(skb, ETH_HLEN);
+
+ macvlan_start_xmit(skb, q->vlan->dev);
+
+ return count;
+}
+
+static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t result = -ENOLINK;
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+
+ if (!q)
+ goto out;
+
+ result = macvtap_get_user(q, iv, iov_length(iv, count),
+ file->f_flags & O_NONBLOCK);
+ macvtap_file_put_queue(q);
+out:
+ return result;
+}
+
+/* Put packet to the user space buffer */
+static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ const struct iovec *iv, int len)
+{
+ struct macvlan_dev *vlan = q->vlan;
+ int ret;
+
+ len = min_t(int, skb->len, len);
+
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, 0, len);
+
+ macvlan_count_rx(vlan, len, ret == 0, 0);
+
+ return ret ? ret : len;
+}
+
+static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb;
+ ssize_t len, ret = 0;
+
+ if (!q)
+ return -ENOLINK;
+
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ add_wait_queue(q->sk.sk_sleep, &wait);
+ while (len) {
+ current->state = TASK_INTERRUPTIBLE;
+
+ /* Read frames from the queue */
+ skb = skb_dequeue(&q->sk.sk_receive_queue);
+ if (!skb) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ continue;
+ }
+ ret = macvtap_put_user(q, skb, iv, len);
+ kfree_skb(skb);
+ break;
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(q->sk.sk_sleep, &wait);
+
+out:
+ macvtap_file_put_queue(q);
+ return ret;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct macvtap_queue *q;
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ unsigned int u;
+ char devname[IFNAMSIZ];
+
+ switch (cmd) {
+ case TUNSETIFF:
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+ if (u != (IFF_TAP | IFF_NO_PI))
+ return -EINVAL;
+ return 0;
+
+ case TUNGETIFF:
+ q = macvtap_file_get_queue(file);
+ if (!q)
+ return -ENOLINK;
+ memcpy(devname, q->vlan->dev->name, sizeof(devname));
+ macvtap_file_put_queue(q);
+
+ if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ put_user((TUN_TAP_DEV | TUN_NO_PI), &ifr->ifr_flags))
+ return -EFAULT;
+ return 0;
+
+ case TUNGETFEATURES:
+ if (put_user((IFF_TAP | IFF_NO_PI), up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ if (get_user(u, up))
+ return -EFAULT;
+
+ q = macvtap_file_get_queue(file);
+ if (!q)
+ return -ENOLINK;
+ q->sk.sk_sndbuf = u;
+ macvtap_file_put_queue(q);
+ return 0;
+
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ /* TODO: add support for these, so far we don't
+ support any offload */
+ if (arg & (TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations macvtap_fops = {
+ .owner = THIS_MODULE,
+ .open = macvtap_open,
+ .release = macvtap_release,
+ .aio_read = macvtap_aio_read,
+ .aio_write = macvtap_aio_write,
+ .poll = macvtap_poll,
+ .llseek = no_llseek,
+ .unlocked_ioctl = macvtap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = macvtap_compat_ioctl,
+#endif
+};
+
+static int macvtap_init(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&macvtap_major, 0,
+ MACVTAP_NUM_DEVS, "macvtap");
+ if (err)
+ goto out1;
+
+ cdev_init(&macvtap_cdev, &macvtap_fops);
+ err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
+ if (err)
+ goto out2;
+
+ macvtap_class = class_create(THIS_MODULE, "macvtap");
+ if (IS_ERR(macvtap_class)) {
+ err = PTR_ERR(macvtap_class);
+ goto out3;
+ }
+
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out4;
+
+ return 0;
+
+out4:
+ class_unregister(macvtap_class);
+out3:
+ cdev_del(&macvtap_cdev);
+out2:
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+out1:
+ return err;
+}
+module_init(macvtap_init);
+
+static void macvtap_exit(void)
+{
+ rtnl_link_unregister(&macvtap_link_ops);
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+}
+module_exit(macvtap_exit);
+
+MODULE_ALIAS_RTNL_LINK("macvtap");
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386b..9f72cb45f4af 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
static const char *meth_str="SGI O2 Fast Ethernet";
-#define HAVE_TX_TIMEOUT
/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
#define TX_TIMEOUT (400*HZ/1000)
-#ifdef HAVE_TX_TIMEOUT
static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
-#endif
/*
* This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3cf56d90d859..8f6e816a7395 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
return __mlx4_init_one(pdev, NULL);
}
-static struct pci_device_id mlx4_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af55efe7..e24072a9a979 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <asm/system.h>
-#include <linux/list.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -1697,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0d..c0884a9cba3c 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4085,7 +4085,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
-static struct pci_device_id myri10ge_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b703..8b4313085359 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
pad[0] = MYRI_PAD_LEN;
pad[1] = 0xab;
- /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
+ * length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce27..c64e5b0d3596 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
{ "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
@@ -2488,7 +2488,7 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
@@ -2496,7 +2496,7 @@ static void __set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151c..85aec4f10131 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
};
-static struct pci_device_id ne2k_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
{ 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
{ 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
{ 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e4..861a0590b1f4 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
# MA 02111-1307, USA.
#
# The full GNU General Public License is included in this distribution
-# in the file called LICENSE.
+# in the file called "COPYING".
#
#
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9bc5bd1d538a..144d2e880422 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -420,7 +420,7 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0x3eb000
+#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f6878047..2a8ef5fc9663 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 542f408333ff..f8499e56cbee 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927a..622e4c8be937 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -969,7 +969,8 @@ enum {
#define NX_DEV_READY 3
#define NX_DEV_NEED_RESET 4
#define NX_DEV_NEED_QUISCENT 5
-#define NX_DEV_FAILED 6
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
#define NX_RCODE_DRIVER_INFO 0x20000000
#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 85e28e60ecf1..25f4414cc33e 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -554,7 +554,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
return;
}
- if (netdev->mc_count == 0) {
+ if (netdev_mc_empty(netdev)) {
adapter->set_promisc(adapter,
NETXEN_NIU_NON_PROMISC_MODE);
netxen_nic_disable_mcast_filter(adapter);
@@ -563,7 +563,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > adapter->max_mc_count) {
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
netxen_nic_disable_mcast_filter(adapter);
return;
}
@@ -573,7 +573,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
- if (index != netdev->mc_count)
+ if (index != netdev_mc_count(netdev))
printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
netxen_nic_driver_name, netdev->name);
@@ -704,12 +704,12 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
@@ -777,17 +777,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
{
nx_nic_req_t req;
- u64 word;
- int rv;
+ u64 word[6];
+ int rv, i;
memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
- word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
- memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
@@ -1033,7 +1036,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
return 0;
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
__le32 *pmac = (__le32 *) mac;
u32 offset;
@@ -1058,7 +1061,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
return 0;
}
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
uint32_t crbaddr, mac_hi, mac_lo;
int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583a..e2c5b6f2df03 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68d372c..1c63610ead42 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -780,6 +780,9 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
+ if (adapter->need_fw_reset)
+ return 1;
+
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 24279e6e55f5..08780ef1c1f8 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
+#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -430,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
{
int i;
unsigned char *p;
- __le64 mac_addr;
+ u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
-static int __netxen_nic_shutdown(struct pci_dev *pdev)
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
- struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
nx_decr_dev_ref_cnt(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
-
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- if (netxen_nic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
-
- pci_disable_device(pdev);
-
- return 0;
}
-static void netxen_nic_shutdown(struct pci_dev *pdev)
-{
- if (__netxen_nic_shutdown(pdev))
- return;
-}
-#ifdef CONFIG_PM
-static int
-netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int retval;
-
- retval = __netxen_nic_shutdown(pdev);
- if (retval)
- return retval;
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int
-netxen_nic_resume(struct pci_dev *pdev)
+static int netxen_nic_attach_func(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
err = pci_enable_device(pdev);
if (err)
return err;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
if (err)
goto err_out_detach;
- netif_device_attach(netdev);
-
netxen_config_indev_addr(netdev, NETDEV_UP);
}
+ netif_device_attach(netdev);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
return 0;
@@ -1517,6 +1492,85 @@ err_out:
nx_decr_dev_ref_cnt(adapter);
return err;
}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
#endif
static int netxen_nic_open(struct net_device *netdev)
@@ -2104,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
return count;
}
-static void
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+static int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
+ int ret = -EINVAL;
if (netxen_api_lock(adapter))
- return;
+ return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state != NX_DEV_INITALIZING)
+ if (state == NX_DEV_NEED_RESET)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ ret = 0;
+ }
netxen_api_unlock(adapter);
+
+ return ret;
}
static int
@@ -2271,17 +2354,29 @@ netxen_check_health(struct netxen_adapter *adapter)
u32 state, heartbit;
struct net_device *netdev = adapter->netdev;
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
if (netxen_nic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset) {
- nx_dev_request_reset(adapter);
+ if (nx_dev_request_reset(adapter))
+ return 0;
goto detach;
}
- state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state == NX_DEV_NEED_RESET)
- goto detach;
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
@@ -2290,12 +2385,17 @@ netxen_check_health(struct netxen_adapter *adapter)
if (heartbit != adapter->heartbit) {
adapter->heartbit = heartbit;
adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
return 0;
}
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
clear_bit(__NX_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2498,7 +2598,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2725,6 +2825,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
+static struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
@@ -2734,7 +2840,8 @@ static struct pci_driver netxen_driver = {
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
#endif
- .shutdown = netxen_nic_shutdown
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
};
static int __init netxen_init_module(void)
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b42f5e522f90..497c6d514a68 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -597,7 +597,7 @@ static int init586(struct net_device *dev)
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index ae19aafd2c7e..9225c76cac40 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -849,7 +849,7 @@ static int ni65_lance_reinit(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b382c40..5e604e305d95 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -33,7 +35,6 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Nov 14, 2008"
@@ -58,7 +59,7 @@ static void writeq(u64 val, void __iomem *reg)
}
#endif
-static struct pci_device_id niu_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
{}
};
@@ -89,21 +90,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "NIU debug level");
-#define niudbg(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f, ## a); \
-} while (0)
-
-#define niuinfo(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f, ## a); \
-} while (0)
-
-#define niuwarn(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f, ## a); \
-} while (0)
-
#define niu_lock_parent(np, flags) \
spin_lock_irqsave(&np->parent->lock, flags)
#define niu_unlock_parent(np, flags) \
@@ -135,10 +121,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
nw64_mac(reg, bits);
err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_mac(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_mac(reg));
return err;
}
@@ -175,10 +160,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_ipp(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_ipp(reg));
return err;
}
@@ -216,10 +200,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
nw64(reg, bits);
err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64(reg));
return err;
}
@@ -475,9 +458,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -486,9 +468,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -531,8 +512,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -569,9 +550,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -580,9 +560,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -639,9 +618,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- pr_info(PFX "NIU Port %u signal bits [%08x] are not "
- "[%08x] for 10G...trying 1G\n",
- np->port, (int) (sig & mask), (int) val);
+ pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+ np->port, (int)(sig & mask), (int)val);
/* 10G failed, try initializing at 1G */
err = serdes_init_niu_1g_serdes(np);
@@ -649,8 +627,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES "
- "Link Failed \n", np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -764,9 +742,8 @@ static int esr_reset(struct niu *np)
if (err)
return err;
if (reset != 0) {
- dev_err(np->device, PFX "Port %u ESR_RESET "
- "did not clear [%08x]\n",
- np->port, reset);
+ netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+ np->port, reset);
return -ENODEV;
}
@@ -890,8 +867,8 @@ static int serdes_init_10g(struct niu *np)
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
return 0;
}
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
@@ -1039,8 +1016,8 @@ static int serdes_init_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -1332,8 +1309,8 @@ static int bcm8704_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u PHY will not reset "
- "(bmcr=%04x)\n", np->port, (err & 0xffff));
+ netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+ np->port, (err & 0xffff));
return -ENODEV;
}
return 0;
@@ -1515,21 +1492,18 @@ static int xcvr_diag_bcm870x(struct niu *np)
MII_STAT1000);
if (err < 0)
return err;
- pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
if (err < 0)
return err;
- pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
- np->port, err);
+ pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
MII_NWAYTEST);
if (err < 0)
return err;
- pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
#endif
/* XXX dig this out it might not be so useful XXX */
@@ -1555,11 +1529,11 @@ static int xcvr_diag_bcm870x(struct niu *np)
if (analog_stat0 != 0x03fc) {
if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
- pr_info(PFX "Port %u cable not connected "
- "or bad cable.\n", np->port);
+ pr_info("Port %u cable not connected or bad cable\n",
+ np->port);
} else if (analog_stat0 == 0x639c) {
- pr_info(PFX "Port %u optical module is bad "
- "or missing.\n", np->port);
+ pr_info("Port %u optical module is bad or missing\n",
+ np->port);
}
}
@@ -1699,8 +1673,8 @@ static int mii_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u MII would not reset, "
- "bmcr[%04x]\n", np->port, err);
+ netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+ np->port, err);
return -ENODEV;
}
@@ -1895,7 +1869,7 @@ static int mii_init_common(struct niu *np)
return err;
bmsr = err;
- pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
np->port, bmcr, bmsr);
#endif
@@ -1948,16 +1922,12 @@ static int niu_link_status_common(struct niu *np, int link_up)
unsigned long flags;
if (!netif_carrier_ok(dev) && link_up) {
- niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
- dev->name,
- (lp->active_speed == SPEED_10000 ?
- "10Gb/sec" :
- (lp->active_speed == SPEED_1000 ?
- "1Gb/sec" :
- (lp->active_speed == SPEED_100 ?
- "100Mbit/sec" : "10Mbit/sec"))),
- (lp->active_duplex == DUPLEX_FULL ?
- "full" : "half"));
+ netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+ lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+ lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+ lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+ "10Mbit/sec",
+ lp->active_duplex == DUPLEX_FULL ? "full" : "half");
spin_lock_irqsave(&np->lock, flags);
niu_init_xif(np);
@@ -1966,7 +1936,7 @@ static int niu_link_status_common(struct niu *np, int link_up)
netif_carrier_on(dev);
} else if (netif_carrier_ok(dev) && !link_up) {
- niuwarn(LINK, "%s: Link is down\n", dev->name);
+ netif_warn(np, link, dev, "Link is down\n");
spin_lock_irqsave(&np->lock, flags);
niu_handle_led(np, 0);
spin_unlock_irqrestore(&np->lock, flags);
@@ -2232,8 +2202,8 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
} else {
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
*link_up_p = 0;
- niuwarn(LINK, "%s: Hotplug PHY Removed\n",
- np->dev->name);
+ netif_warn(np, link, np->dev,
+ "Hotplug PHY Removed\n");
}
}
out:
@@ -2531,8 +2501,8 @@ static int serdes_init_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
- np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -3234,23 +3204,22 @@ static int fflp_early_init(struct niu *np)
parent = np->parent;
err = 0;
if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
- niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
- np->port);
if (np->parent->plat_type != PLAT_TYPE_NIU) {
fflp_reset(np);
fflp_set_timings(np);
err = fflp_disable_all_partitions(np);
if (err) {
- niudbg(PROBE, "fflp_disable_all_partitions "
- "failed, err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_disable_all_partitions failed, err=%d\n",
+ err);
goto out;
}
}
err = tcam_early_init(np);
if (err) {
- niudbg(PROBE, "tcam_early_init failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_early_init failed, err=%d\n", err);
goto out;
}
fflp_llcsnap_enable(np, 1);
@@ -3260,22 +3229,22 @@ static int fflp_early_init(struct niu *np)
err = tcam_flush_all(np);
if (err) {
- niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_flush_all failed, err=%d\n", err);
goto out;
}
if (np->parent->plat_type != PLAT_TYPE_NIU) {
err = fflp_hash_clear(np);
if (err) {
- niudbg(PROBE, "fflp_hash_clear failed, "
- "err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_hash_clear failed, err=%d\n",
+ err);
goto out;
}
}
vlan_tbl_clear(np);
- niudbg(PROBE, "fflp_early_init: Success\n");
parent->flags |= PARENT_FLGS_CLS_HWINIT;
}
out:
@@ -3665,8 +3634,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
cons = rp->cons;
- niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
- np->dev->name, pkt_cnt, cons);
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
@@ -3714,11 +3683,12 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_errors += misc & RXMISC_COUNT;
if (unlikely(misc & RXMISC_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "RXMISC discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+ rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
- np->dev->name, rx_channel, misc, misc-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: MISC drop=%u over=%u\n",
+ rx_channel, misc, misc-limit);
}
/* WRED (Weighted Random Early Discard) by hardware */
@@ -3728,11 +3698,11 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
if (unlikely(wred & RED_DIS_CNT_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "WRED discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
- np->dev->name, rx_channel, wred, wred-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: WRED drop=%u over=%u\n",
+ rx_channel, wred, wred-limit);
}
}
@@ -3753,8 +3723,9 @@ static int niu_rx_work(struct napi_struct *napi, struct niu *np,
mbox->rx_dma_ctl_stat = 0;
mbox->rcrstat_a = 0;
- niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
- np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+ netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+ "%s(chan[%d]), stat[%llx] qlen=%d\n",
+ __func__, rp->rx_channel, (unsigned long long)stat, qlen);
rcr_done = work_done = 0;
qlen = min(qlen, budget);
@@ -3791,8 +3762,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
u32 rx_vec = (v0 & 0xffffffff);
int i, work_done = 0;
- niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
- np->dev->name, (unsigned long long) v0);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -3837,39 +3808,38 @@ static int niu_poll(struct napi_struct *napi, int budget)
static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
u64 stat)
{
- dev_err(np->device, PFX "%s: RX channel %u errors ( ",
- np->dev->name, rp->rx_channel);
+ netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
- printk("RBR_TMOUT ");
+ pr_cont("RBR_TMOUT ");
if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
- printk("RSP_CNT ");
+ pr_cont("RSP_CNT ");
if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
- printk("BYTE_EN_BUS ");
+ pr_cont("BYTE_EN_BUS ");
if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
- printk("RSP_DAT ");
+ pr_cont("RSP_DAT ");
if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
- printk("RCR_ACK ");
+ pr_cont("RCR_ACK ");
if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
- printk("RCR_SHA_PAR ");
+ pr_cont("RCR_SHA_PAR ");
if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
- printk("RBR_PRE_PAR ");
+ pr_cont("RBR_PRE_PAR ");
if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
- printk("CONFIG ");
+ pr_cont("CONFIG ");
if (stat & RX_DMA_CTL_STAT_RCRINCON)
- printk("RCRINCON ");
+ pr_cont("RCRINCON ");
if (stat & RX_DMA_CTL_STAT_RCRFULL)
- printk("RCRFULL ");
+ pr_cont("RCRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRFULL)
- printk("RBRFULL ");
+ pr_cont("RBRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
- printk("RBRLOGPAGE ");
+ pr_cont("RBRLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
- printk("CFIGLOGPAGE ");
+ pr_cont("CFIGLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
- printk("DC_FIDO ");
+ pr_cont("DC_FIDO ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
@@ -3883,9 +3853,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
err = -EINVAL;
if (err) {
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
- np->dev->name, rp->rx_channel,
- (unsigned long long) stat);
+ netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+ rp->rx_channel,
+ (unsigned long long) stat);
niu_log_rxchan_errors(np, rp, stat);
}
@@ -3899,27 +3869,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
u64 cs)
{
- dev_err(np->device, PFX "%s: TX channel %u errors ( ",
- np->dev->name, rp->tx_channel);
+ netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
if (cs & TX_CS_MBOX_ERR)
- printk("MBOX ");
+ pr_cont("MBOX ");
if (cs & TX_CS_PKT_SIZE_ERR)
- printk("PKT_SIZE ");
+ pr_cont("PKT_SIZE ");
if (cs & TX_CS_TX_RING_OFLOW)
- printk("TX_RING_OFLOW ");
+ pr_cont("TX_RING_OFLOW ");
if (cs & TX_CS_PREF_BUF_PAR_ERR)
- printk("PREF_BUF_PAR ");
+ pr_cont("PREF_BUF_PAR ");
if (cs & TX_CS_NACK_PREF)
- printk("NACK_PREF ");
+ pr_cont("NACK_PREF ");
if (cs & TX_CS_NACK_PKT_RD)
- printk("NACK_PKT_RD ");
+ pr_cont("NACK_PKT_RD ");
if (cs & TX_CS_CONF_PART_ERR)
- printk("CONF_PART ");
+ pr_cont("CONF_PART ");
if (cs & TX_CS_PKT_PRT_ERR)
- printk("PKT_PTR ");
+ pr_cont("PKT_PTR ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
@@ -3930,12 +3899,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
- dev_err(np->device, PFX "%s: TX channel %u error, "
- "cs[%llx] logh[%llx] logl[%llx]\n",
- np->dev->name, rp->tx_channel,
- (unsigned long long) cs,
- (unsigned long long) logh,
- (unsigned long long) logl);
+ netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+ rp->tx_channel,
+ (unsigned long long)cs,
+ (unsigned long long)logh,
+ (unsigned long long)logl);
niu_log_txchan_errors(np, rp, cs);
@@ -3954,9 +3922,8 @@ static int niu_mif_interrupt(struct niu *np)
phy_mdint = 1;
}
- dev_err(np->device, PFX "%s: MIF interrupt, "
- "stat[%llx] phy_mdint(%d)\n",
- np->dev->name, (unsigned long long) mif_status, phy_mdint);
+ netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+ (unsigned long long)mif_status, phy_mdint);
return -ENODEV;
}
@@ -4081,41 +4048,40 @@ static int niu_mac_interrupt(struct niu *np)
static void niu_log_device_error(struct niu *np, u64 stat)
{
- dev_err(np->device, PFX "%s: Core device errors ( ",
- np->dev->name);
+ netdev_err(np->dev, "Core device errors ( ");
if (stat & SYS_ERR_MASK_META2)
- printk("META2 ");
+ pr_cont("META2 ");
if (stat & SYS_ERR_MASK_META1)
- printk("META1 ");
+ pr_cont("META1 ");
if (stat & SYS_ERR_MASK_PEU)
- printk("PEU ");
+ pr_cont("PEU ");
if (stat & SYS_ERR_MASK_TXC)
- printk("TXC ");
+ pr_cont("TXC ");
if (stat & SYS_ERR_MASK_RDMC)
- printk("RDMC ");
+ pr_cont("RDMC ");
if (stat & SYS_ERR_MASK_TDMC)
- printk("TDMC ");
+ pr_cont("TDMC ");
if (stat & SYS_ERR_MASK_ZCP)
- printk("ZCP ");
+ pr_cont("ZCP ");
if (stat & SYS_ERR_MASK_FFLP)
- printk("FFLP ");
+ pr_cont("FFLP ");
if (stat & SYS_ERR_MASK_IPP)
- printk("IPP ");
+ pr_cont("IPP ");
if (stat & SYS_ERR_MASK_MAC)
- printk("MAC ");
+ pr_cont("MAC ");
if (stat & SYS_ERR_MASK_SMX)
- printk("SMX ");
+ pr_cont("SMX ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_device_error(struct niu *np)
{
u64 stat = nr64(SYS_ERR_STAT);
- dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netdev_err(np->dev, "Core device error, stat[%llx]\n",
+ (unsigned long long)stat);
niu_log_device_error(np, stat);
@@ -4197,8 +4163,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
RX_DMA_CTL_STAT_RCRTO);
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
- niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
}
static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
@@ -4206,8 +4172,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
{
rp->tx_cs = nr64(TX_CS(rp->tx_channel));
- niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
- np->dev->name, (unsigned long long) rp->tx_cs);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
}
static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
@@ -4265,8 +4231,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
u64 v0, v1, v2;
if (netif_msg_intr(np))
- printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
- lp, ldg);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+ __func__, lp, ldg);
spin_lock_irqsave(&np->lock, flags);
@@ -4275,7 +4241,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
v2 = nr64(LDSV2(ldg));
if (netif_msg_intr(np))
- printk("v0[%llx] v1[%llx] v2[%llx]\n",
+ pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
(unsigned long long) v0,
(unsigned long long) v1,
(unsigned long long) v2);
@@ -4400,8 +4366,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4411,8 +4377,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rcr)
return -ENOMEM;
if ((unsigned long)rp->rcr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+ rp->rcr);
return -EINVAL;
}
rp->rcr_table_size = MAX_RCR_RING_SIZE;
@@ -4424,8 +4390,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rbr)
return -ENOMEM;
if ((unsigned long)rp->rbr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+ rp->rbr);
return -EINVAL;
}
rp->rbr_table_size = MAX_RBR_RING_SIZE;
@@ -4458,8 +4424,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4469,8 +4435,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->descr)
return -ENOMEM;
if ((unsigned long)rp->descr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA descr table %p\n", np->dev->name, rp->descr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+ rp->descr);
return -EINVAL;
}
@@ -4726,10 +4692,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
TX_RNG_CFIG_STADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "DMA addr (%llx) is not aligned.\n",
- np->dev->name, channel,
- (unsigned long long) rp->descr_dma);
+ netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+ channel, (unsigned long long)rp->descr_dma);
return -EINVAL;
}
@@ -4746,10 +4710,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "MBOX addr (%llx) is has illegal bits.\n",
- np->dev->name, channel,
- (unsigned long long) rp->mbox_dma);
+ netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+ channel, (unsigned long long)rp->mbox_dma);
return -EINVAL;
}
nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
@@ -5146,9 +5108,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5160,9 +5121,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5527,8 +5487,7 @@ static int niu_reset_tx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
- "BTXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BTXMAC_SW_RST));
return -ENODEV;
@@ -5629,12 +5588,11 @@ static int niu_reset_rx_xmac(struct niu *np)
while (--limit >= 0) {
if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
XRXMAC_SW_RST_SOFT_RST)))
- break;
+ break;
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
- "XRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(XRXMAC_SW_RST));
return -ENODEV;
@@ -5655,8 +5613,7 @@ static int niu_reset_rx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
- "BRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BRXMAC_SW_RST));
return -ENODEV;
@@ -5960,11 +5917,9 @@ static void niu_disable_ipp(struct niu *np)
}
if (limit < 0 &&
(rd != 0 && wr != 1)) {
- dev_err(np->device, PFX "%s: IPP would not quiesce, "
- "rd_ptr[%llx] wr_ptr[%llx]\n",
- np->dev->name,
- (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
- (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+ (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
}
val = nr64_ipp(IPP_CFIG);
@@ -5981,12 +5936,12 @@ static int niu_init_hw(struct niu *np)
{
int i, err;
- niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
niu_txc_enable_port(np, 1);
niu_txc_port_dma_enable(np, 1);
niu_txc_set_imask(np, 0);
- niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -5995,27 +5950,27 @@ static int niu_init_hw(struct niu *np)
return err;
}
- niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
err = niu_init_rx_channels(np);
if (err)
goto out_uninit_tx_channels;
- niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
err = niu_init_classifier_hw(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
err = niu_init_zcp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
err = niu_init_ipp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
err = niu_init_mac(np);
if (err)
goto out_uninit_ipp;
@@ -6023,16 +5978,16 @@ static int niu_init_hw(struct niu *np)
return 0;
out_uninit_ipp:
- niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
niu_disable_ipp(np);
out_uninit_rx_channels:
- niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
niu_stop_rx_channels(np);
niu_reset_rx_channels(np);
out_uninit_tx_channels:
- niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
niu_stop_tx_channels(np);
niu_reset_tx_channels(np);
@@ -6041,25 +5996,25 @@ out_uninit_tx_channels:
static void niu_stop_hw(struct niu *np)
{
- niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
niu_enable_interrupts(np, 0);
- niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
niu_enable_rx_mac(np, 0);
- niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
niu_disable_ipp(np);
- niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
niu_stop_tx_channels(np);
- niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
niu_stop_rx_channels(np);
- niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
niu_reset_tx_channels(np);
- niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
niu_reset_rx_channels(np);
}
@@ -6369,10 +6324,10 @@ static void niu_set_rx_mode(struct net_device *dev)
np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
if (dev->flags & IFF_PROMISC)
np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
np->flags |= NIU_FLAGS_MCAST;
- alt_cnt = dev->uc.count;
+ alt_cnt = netdev_uc_count(dev);
if (alt_cnt > niu_num_alt_addr(np)) {
alt_cnt = 0;
np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,17 +6336,15 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "adding alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d adding alt mac %d\n",
+ err, index);
err = niu_enable_alt_mac(np, index, 1);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "enabling alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d enabling alt mac %d\n",
+ err, index);
index++;
}
@@ -6404,15 +6357,14 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = alt_start; i < niu_num_alt_addr(np); i++) {
err = niu_enable_alt_mac(np, i, 0);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "disabling alt mac %d\n",
- dev->name, err, i);
+ netdev_warn(dev, "Error %d disabling alt mac %d\n",
+ err, i);
}
}
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
for (addr = dev->mc_list; addr; addr = addr->next) {
u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
@@ -6570,7 +6522,7 @@ static void niu_tx_timeout(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
+ dev_err(np->device, "%s: Transmit timed out, resetting\n",
dev->name);
schedule_work(&np->reset_task);
@@ -6672,8 +6624,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_tx_stop_queue(txq);
- dev_err(np->device, PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", dev->name);
+ dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
rp->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -7237,8 +7188,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
tp = &parent->tcam[idx];
if (!tp->valid) {
- pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
- parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+ parent->index, (u16)nfc->fs.location, idx);
return -EINVAL;
}
@@ -7248,8 +7199,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
ret = niu_class_to_ethflow(class, &fsp->flow_type);
if (ret < 0) {
- pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+ parent->index);
ret = -EINVAL;
goto out;
}
@@ -7332,9 +7283,8 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
if (n_entries != cnt) {
/* print warning, this should not happen */
- pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
- "n_entries[%d] != cnt[%d]!!!\n\n",
- np->parent->index, np->dev->name, n_entries, cnt);
+ netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
+ np->parent->index, __func__, n_entries, cnt);
}
return 0;
@@ -7561,9 +7511,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
}
}
if (!add_usr_cls) {
- pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
- "Could not find/insert class for pid %d\n",
- parent->index, np->dev->name, uspec->proto);
+ netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+ parent->index, __func__, uspec->proto);
ret = -EINVAL;
goto out;
}
@@ -7596,9 +7545,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
case AH_V6_FLOW:
case ESP_V6_FLOW:
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "flow %d for IPv6 not implemented\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
@@ -7607,17 +7555,15 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
class);
} else {
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "usr flow for IPv6 not implemented\n\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
+ parent->index, __func__);
ret = -EINVAL;
goto out;
}
break;
default:
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Unknown flow type %d\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
}
@@ -7627,10 +7573,9 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
tp->assoc_data = TCAM_ASSOCDATA_DISC;
} else {
if (fsp->ring_cookie >= np->num_rx_rings) {
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Invalid RX ring %lld\n\n",
- parent->index, np->dev->name,
- (long long) fsp->ring_cookie);
+ netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+ parent->index, __func__,
+ (long long)fsp->ring_cookie);
ret = -EINVAL;
goto out;
}
@@ -7699,10 +7644,9 @@ static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
}
}
if (i == NIU_L3_PROG_CLS) {
- pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
- "Usr class 0x%llx not found \n",
- parent->index, np->dev->name,
- (unsigned long long) class);
+ netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+ parent->index, __func__,
+ (unsigned long long)class);
ret = -EINVAL;
goto out;
}
@@ -8001,9 +7945,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, PFX "Port %u, mis-matched "
- "LDG assignment "
- "for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
@@ -8056,7 +7998,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8071,7 +8013,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8152,8 +8094,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
s += i + 5;
sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
- niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
- vpd->fcode_major, vpd->fcode_minor);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+ vpd->fcode_major, vpd->fcode_minor);
if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
(vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
@@ -8173,8 +8116,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
#define FOUND_MASK_PHY 0x00000020
#define FOUND_MASK_ALL 0x0000003f
- niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
- start, end);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: start[%x] end[%x]\n", start, end);
while (start < end) {
int len, err, instance, type, prop_len;
char namebuf[64];
@@ -8228,8 +8171,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
}
if (max_len && prop_len > max_len) {
- dev_err(np->device, PFX "Property '%s' length (%d) is "
- "too long.\n", namebuf, prop_len);
+ dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
return -EINVAL;
}
@@ -8237,8 +8179,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
u32 off = start + 5 + err;
int i;
- niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
- "len[%d]\n", namebuf, prop_len);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
for (i = 0; i < prop_len; i++)
*prop_buf++ = niu_pci_eeprom_read(np, off + i);
}
@@ -8402,8 +8345,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
- dev_err(np->device, PFX "VPD MAC invalid, "
- "falling back to SPROM.\n");
+ dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
@@ -8420,14 +8362,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
}
if (np->flags & NIU_FLAGS_10G)
- np->mac_xcvr = MAC_XCVR_XPCS;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "Illegal phy string [%s].\n",
+ dev_err(np->device, "Illegal phy string [%s]\n",
np->vpd.phy_type);
- dev_err(np->device, PFX "Falling back to SPROM.\n");
+ dev_err(np->device, "Falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
}
@@ -8455,7 +8397,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->eeprom_len = len;
- niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Image size %llu\n", (unsigned long long)val);
sum = 0;
for (i = 0; i < len; i++) {
@@ -8465,10 +8408,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
sum += (val >> 16) & 0xff;
sum += (val >> 24) & 0xff;
}
- niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Checksum %x\n", (int)(sum & 0xff));
if ((sum & 0xff) != 0xab) {
- dev_err(np->device, PFX "Bad SPROM checksum "
- "(%x, should be 0xab)\n", (int) (sum & 0xff));
+ dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
return -EINVAL;
}
@@ -8491,11 +8434,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
ESPC_PHY_TYPE_PORT3_SHIFT;
break;
default:
- dev_err(np->device, PFX "Bogus port number %u\n",
+ dev_err(np->device, "Bogus port number %u\n",
np->port);
return -EINVAL;
}
- niudbg(PROBE, "SPROM: PHY type %x\n", val8);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: PHY type %x\n", val8);
switch (val8) {
case ESPC_PHY_TYPE_1G_COPPER:
@@ -8527,30 +8471,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
+ dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
return -EINVAL;
}
val = nr64(ESPC_MAC_ADDR0);
- niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
dev->perm_addr[0] = (val >> 0) & 0xff;
dev->perm_addr[1] = (val >> 8) & 0xff;
dev->perm_addr[2] = (val >> 16) & 0xff;
dev->perm_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
- niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
dev->perm_addr[4] = (val >> 0) & 0xff;
dev->perm_addr[5] = (val >> 8) & 0xff;
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- dev_err(np->device, PFX "SPROM MAC address invalid\n");
- dev_err(np->device, PFX "[ \n");
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+ dev->perm_addr);
return -EINVAL;
}
@@ -8562,8 +8503,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
val = nr64(ESPC_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 8 * 4)
return -EINVAL;
@@ -8578,8 +8519,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.model[val] = '\0';
val = nr64(ESPC_BD_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 4 * 4)
return -EINVAL;
@@ -8595,8 +8536,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.mac_num =
nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
- niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
- np->vpd.mac_num);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
return 0;
}
@@ -8629,8 +8570,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
}
}
- niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
- np->port, parent->num_ports);
if (np->port >= parent->num_ports)
return -ENODEV;
@@ -8659,14 +8598,12 @@ static int __devinit phy_record(struct niu_parent *parent,
pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
parent->index, id,
- (type == PHY_TYPE_PMA_PMD ?
- "PMA/PMD" :
- (type == PHY_TYPE_PCS ?
- "PCS" : "MII")),
+ type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+ type == PHY_TYPE_PCS ? "PCS" : "MII",
phy_port);
if (p->cur[type] >= NIU_MAX_PORTS) {
- printk(KERN_ERR PFX "Too many PHY ports.\n");
+ pr_err("Too many PHY ports\n");
return -EINVAL;
}
idx = p->cur[type];
@@ -8727,8 +8664,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
parent->rxchan_per_port[i] = (16 / num_ports);
parent->txchan_per_port[i] = (16 / num_ports);
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8771,8 +8707,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
parent->rxchan_per_port[i] = rx_chans_per_1g;
parent->txchan_per_port[i] = tx_chans_per_1g;
}
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8781,23 +8716,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
}
if (tot_rx > NIU_NUM_RXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
parent->index, tot_rx);
for (i = 0; i < num_ports; i++)
parent->rxchan_per_port[i] = 1;
}
if (tot_tx > NIU_NUM_TXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
parent->index, tot_tx);
for (i = 0; i < num_ports; i++)
parent->txchan_per_port[i] = 1;
}
if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
- printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
- "RX[%d] TX[%d]\n",
- parent->index, tot_rx, tot_tx);
+ pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
}
}
@@ -8825,18 +8757,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
struct rdc_table *rt = &tp->tables[grp];
int slot;
- pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
+ pr_info("niu%d: Port %d RDC tbl(%d) [ ",
parent->index, i, tp->first_table_num + grp);
for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
rt->rxdma_channel[slot] =
rdc_channel_base + this_channel_offset;
- printk("%d ", rt->rxdma_channel[slot]);
+ pr_cont("%d ", rt->rxdma_channel[slot]);
if (++this_channel_offset == num_channels)
this_channel_offset = 0;
}
- printk("]\n");
+ pr_cont("]\n");
}
parent->rdc_default[i] = rdc_channel_base;
@@ -8996,8 +8928,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
break;
default:
- printk(KERN_ERR PFX "Unsupported port config "
- "10G[%d] 1G[%d]\n",
+ pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
num_10g, num_1g);
return -EINVAL;
}
@@ -9015,8 +8946,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
return 0;
unknown_vg_1g_port:
- printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
- lowest_1g);
+ pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
return -EINVAL;
}
@@ -9025,9 +8955,6 @@ static int __devinit niu_probe_ports(struct niu *np)
struct niu_parent *parent = np->parent;
int err, i;
- niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
- parent->port_phy);
-
if (parent->port_phy == PORT_PHY_UNKNOWN) {
err = walk_phys(np, parent);
if (err)
@@ -9048,9 +8975,6 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
{
struct niu_classifier *cp = &np->clas;
- niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
- np->parent->tcam_num_entries);
-
cp->tcam_top = (u16) np->port;
cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
@@ -9116,8 +9040,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Port %u is invalid, cannot "
- "compute MAC block offset.\n", np->port);
+ dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
return -EINVAL;
}
@@ -9327,9 +9250,8 @@ static int __devinit niu_get_of_props(struct niu *np)
phy_type = of_get_property(dp, "phy-type", &prop_len);
if (!phy_type) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "phy-type property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks phy-type property\n",
+ dp->full_name);
return -EINVAL;
}
@@ -9339,34 +9261,26 @@ static int __devinit niu_get_of_props(struct niu *np)
strcpy(np->vpd.phy_type, phy_type);
if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
- dp->full_name, np->vpd.phy_type);
+ netdev_err(dev, "%s: Illegal phy string [%s]\n",
+ dp->full_name, np->vpd.phy_type);
return -EINVAL;
}
mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
if (!mac_addr) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "local-mac-address property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+ dp->full_name);
return -EINVAL;
}
if (prop_len != dev->addr_len) {
- dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
- "is wrong.\n",
- dp->full_name, prop_len);
+ netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+ dp->full_name, prop_len);
}
memcpy(dev->perm_addr, mac_addr, dev->addr_len);
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- int i;
-
- dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
- dp->full_name);
- dev_err(np->device, PFX "%s: [ \n",
- dp->full_name);
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ netdev_err(dev, "%s: OF MAC address is invalid\n",
+ dp->full_name);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
return -EINVAL;
}
@@ -9414,8 +9328,8 @@ static int __devinit niu_get_invariants(struct niu *np)
nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
offset = niu_pci_vpd_offset(np);
- niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
- offset);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() VPD offset [%08x]\n", __func__, offset);
if (offset)
niu_pci_vpd_fetch(np, offset);
nw64(ESPC_PIO_EN, 0);
@@ -9575,8 +9489,6 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
struct niu_parent *p;
int i;
- niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
-
plat_dev = platform_device_register_simple("niu", niu_parent_index,
NULL, 0);
if (IS_ERR(plat_dev))
@@ -9641,9 +9553,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np,
struct niu_parent *p, *tmp;
int port = np->port;
- niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
- ptype, port);
-
mutex_lock(&niu_parent_lock);
p = NULL;
list_for_each_entry(tmp, &niu_parent_list, list) {
@@ -9681,7 +9590,8 @@ static void niu_put_parent(struct niu *np)
BUG_ON(!p || p->ports[port] != np);
- niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() port[%u]\n", __func__, port);
sprintf(port_name, "port%d", port);
@@ -9772,7 +9682,7 @@ static struct net_device * __devinit niu_alloc_and_init(
dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
if (!dev) {
- dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
return NULL;
}
@@ -9858,30 +9768,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
- "base addresses, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pos <= 0) {
- dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
@@ -9920,17 +9826,14 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
err = pci_set_consistent_dma_mask(pdev, dma_mask);
if (err) {
- dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
- "DMA for consistent allocations, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
goto err_out_release_parent;
}
}
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_out_release_parent;
}
}
@@ -9939,8 +9842,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
- dev_err(&pdev->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -9955,15 +9857,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&pdev->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
@@ -10157,7 +10057,7 @@ static int __devinit niu_of_probe(struct of_device *op,
reg = of_get_property(op->node, "reg", NULL);
if (!reg) {
- dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
+ dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
op->node->full_name);
return -ENODEV;
}
@@ -10186,8 +10086,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[1]),
"niu regs");
if (!np->regs) {
- dev_err(&op->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -10196,8 +10095,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[2]),
"niu vregs-1");
if (!np->vir_regs_1) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10206,8 +10104,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[3]),
"niu vregs-2");
if (!np->vir_regs_2) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10217,15 +10114,13 @@ static int __devinit niu_of_probe(struct of_device *op,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&op->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&op->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d41536..8dd509c09bc8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1719,7 +1719,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-static struct pci_device_id ns83820_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155a..3a0f910924a5 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -493,8 +493,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
if (netdev->flags & IFF_MULTICAST) {
- if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
- || netdev->mc_count > available_cam_entries)
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
multicast_mode = 2; /* 1 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
@@ -511,7 +511,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
}
if (multicast_mode == 0) {
- i = netdev->mc_count;
+ i = netdev_mc_count(netdev);
list = netdev->mc_list;
while (i--) {
octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
@@ -1119,11 +1119,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
if (p->port >= octeon_bootinfo->mac_addr_count)
dev_err(&pdev->dev,
- "Error %s: Using MAC outside of the assigned range: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ "Error %s: Using MAC outside of the assigned range: %pM\n",
+ netdev->name, netdev->dev_addr);
if (register_netdev(netdev))
goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1e..d44d4a208bbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402affd..bbdf0398c93f 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -211,7 +211,7 @@ static struct {
};
-static struct pci_device_id netdrv_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
@@ -1820,7 +1820,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -1829,7 +1829,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 98938ea9e0bd..3d1d3a7b7ed3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1148,7 +1148,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 322e11df0097..091e0b00043e 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -886,7 +886,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
opts |= RxMulticast;
outw(opts, ioaddr + EL3_CMD);
}
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d11..2ee57bd52a01 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1065,14 +1065,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ei_local->page_lock, flags);
outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
/*
* Slow phase with lock held.
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
-
ei_local->irqlock = 1;
send_length = max(length, ETH_ZLEN);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7b17404d0858..3d573ed5f7c5 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -1187,19 +1187,19 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 12e3233868e9..c42a31a97fa3 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1481,8 +1481,8 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
@@ -1490,13 +1490,13 @@ static void set_multicast_list(struct net_device *dev)
#endif
/* Set multicast_num_addrs. */
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
/* Set multicast_ladrf. */
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
dmi = dmi->next;
BuildLAF(lp->multicast_ladrf, adr);
@@ -1537,15 +1537,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
}
#endif
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
restore_multicast_list(dev);
} /* set_multicast_list */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 6dd486d2977b..d2e86b8887c8 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1638,8 +1638,8 @@ static void set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
- if (dev->mc_count) {
- fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ if (!netdev_mc_empty(dev)) {
+ fill_multicast_tbl(netdev_mc_count(dev), dev->mc_list,
(u_char *)multicast_table);
}
rx_cfg_setting = RxStripCRC | RxEnable;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 466fc72698c0..4ace18a71152 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1384,7 +1384,7 @@ set_addresses(struct net_device *dev)
if (++n > 9)
break;
i = 0;
- if (n > 1 && n <= dev->mc_count && dmi) {
+ if (n > 1 && n <= netdev_mc_count(dev) && dmi) {
dmi = dmi->next;
}
}
@@ -1394,7 +1394,7 @@ set_addresses(struct net_device *dev)
SelectPage(k);
}
- if (n && n <= dev->mc_count && dmi)
+ if (n && n <= netdev_mc_count(dev) && dmi)
addr = dmi->dmi_addr;
else
addr = dev->dev_addr;
@@ -1424,9 +1424,9 @@ set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
- } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff706..63e03159daf7 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -21,6 +21,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
@@ -50,16 +52,16 @@ static const char *const version =
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/irq.h>
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
-static struct pci_device_id pcnet32_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
@@ -83,7 +85,7 @@ static int cards_found;
static unsigned int pcnet32_portlist[] __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0 };
-static int pcnet32_debug = 0;
+static int pcnet32_debug;
static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
static int pcnet32vlb; /* check for VLB cards ? */
@@ -390,7 +392,7 @@ static struct pcnet32_access pcnet32_wio = {
static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
}
static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
@@ -402,7 +404,7 @@ static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
}
static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
@@ -413,7 +415,7 @@ static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
- return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
}
static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
@@ -487,10 +489,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
@@ -498,18 +497,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_tx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -529,15 +524,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
lp->tx_skbuff = new_skb_list;
return;
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_tx_ring:
+free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
new_tx_ring,
new_ring_dma_addr);
- return;
}
/*
@@ -565,10 +559,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
@@ -576,18 +567,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_rx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -599,15 +586,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++ ) {
+ for (; new < size; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
- if (!(rx_skbuff = new_skb_list[new])) {
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
/* keep the original lists and buffers */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
goto free_all_new;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -644,8 +630,8 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_skbuff = new_skb_list;
return;
- free_all_new:
- for (; --new >= lp->rx_ring_size; ) {
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -653,9 +639,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
}
kfree(new_skb_list);
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_rx_ring:
+free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
@@ -838,16 +824,14 @@ static int pcnet32_set_ringparam(struct net_device *dev,
spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_msg_drv(lp))
- printk(KERN_INFO
- "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
- lp->rx_ring_size, lp->tx_ring_size);
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
return 0;
}
static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
- u8 * data)
+ u8 *data)
{
memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
@@ -871,17 +855,15 @@ static void pcnet32_ethtool_test(struct net_device *dev,
if (test->flags == ETH_TEST_FL_OFFLINE) {
rc = pcnet32_loopback_test(dev, data);
if (rc) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test failed.\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
test->flags |= ETH_TEST_FL_FAILED;
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test passed.\n",
- dev->name);
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: No tests to run (specify 'Offline' on ethtool).",
- dev->name);
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
} /* end pcnet32_ethtool_test */
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
@@ -926,40 +908,39 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
/* Initialize Transmit buffers. */
size = data_len + 15;
for (x = 0; x < numbuffs; x++) {
- if (!(skb = dev_alloc_skb(size))) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Cannot allocate skb at line: %d!\n",
- dev->name, __LINE__);
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
goto clean_up;
- } else {
- packet = skb->data;
- skb_put(skb, size); /* create space for data */
- lp->tx_skbuff[x] = skb;
- lp->tx_ring[x].length = cpu_to_le16(-skb->len);
- lp->tx_ring[x].misc = 0;
-
- /* put DA and SA into the skb */
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- /* type */
- *packet++ = 0x08;
- *packet++ = 0x06;
- /* packet number */
- *packet++ = x;
- /* fill packet with data */
- for (i = 0; i < data_len; i++)
- *packet++ = i;
-
- lp->tx_dma_addr[x] =
- pci_map_single(lp->pci_dev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[x].status = cpu_to_le16(status);
}
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
}
x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
@@ -984,9 +965,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
ticks++;
}
if (ticks == 200) {
- if (netif_msg_hw(lp))
- printk("%s: Desc %d failed to reset!\n",
- dev->name, x);
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
break;
}
}
@@ -994,15 +973,14 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
- printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
for (x = 0; x < numbuffs; x++) {
- printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
skb = lp->rx_skbuff[x];
- for (i = 0; i < size; i++) {
- printk("%02x ", *(skb->data + i));
- }
- printk("\n");
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
}
}
@@ -1013,11 +991,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
packet = lp->tx_skbuff[x]->data;
for (i = 0; i < size; i++) {
if (*(skb->data + i) != packet[i]) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error in compare! %2x - %02x %02x\n",
- dev->name, i, *(skb->data + i),
- packet[i]);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
rc = 1;
break;
}
@@ -1025,7 +1001,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
x++;
}
- clean_up:
+clean_up:
*data1 = rc;
pcnet32_purge_tx_ring(dev);
@@ -1044,7 +1020,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
spin_unlock_irqrestore(&lp->lock, flags);
- return (rc);
+ return rc;
} /* end pcnet32_loopback_test */
static void pcnet32_led_blink_callback(struct net_device *dev)
@@ -1056,9 +1032,8 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
int i;
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
@@ -1080,9 +1055,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Save the current value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
regs[i - 4] = a->read_bcr(ioaddr, i);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, jiffies);
@@ -1097,9 +1071,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Restore the original value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, regs[i - 4]);
- }
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1136,10 +1109,8 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
spin_lock_irqsave(&lp->lock, *flags);
ticks++;
if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error getting into suspend!\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
return 0;
}
}
@@ -1184,15 +1155,13 @@ static void pcnet32_rx_entry(struct net_device *dev,
/* Discard oversize frames. */
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Impossible packet size %d!\n",
- dev->name, pkt_len);
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
dev->stats.rx_errors++;
return;
}
if (pkt_len < 60) {
- if (netif_msg_rx_err(lp))
- printk(KERN_ERR "%s: Runt packet!\n", dev->name);
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
dev->stats.rx_errors++;
return;
}
@@ -1200,7 +1169,8 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
- if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
+ newskb = dev_alloc_skb(PKT_BUF_SKB);
+ if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev,
@@ -1218,15 +1188,11 @@ static void pcnet32_rx_entry(struct net_device *dev,
rx_in_place = 1;
} else
skb = NULL;
- } else {
+ } else
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
- }
if (skb == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
@@ -1297,11 +1263,9 @@ static int pcnet32_tx(struct net_device *dev)
/* There was a major error, log it. */
int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx error status=%04x err_status=%08x\n",
- dev->name, status,
- err_status);
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
if (err_status & 0x04000000)
dev->stats.tx_aborted_errors++;
if (err_status & 0x08000000)
@@ -1313,10 +1277,7 @@ static int pcnet32_tx(struct net_device *dev)
dev->stats.tx_fifo_errors++;
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
#else
@@ -1325,10 +1286,7 @@ static int pcnet32_tx(struct net_device *dev)
if (!lp->dxsuflo) { /* If controller doesn't recover ... */
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
}
@@ -1354,11 +1312,8 @@ static int pcnet32_tx(struct net_device *dev)
delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
if (delta > lp->tx_ring_size) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, lp->cur_tx,
- lp->tx_full);
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
dirty_tx += lp->tx_ring_size;
delta -= lp->tx_ring_size;
}
@@ -1421,7 +1376,7 @@ static int pcnet32_get_regs_len(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
int j = lp->phycount * PCNET32_REGS_PER_PHY;
- return ((PCNET32_NUM_REGS + j) * sizeof(u16));
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1445,21 +1400,20 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
*buff++ = inw(ioaddr + i);
/* read control and status registers */
- for (i = 0; i < 90; i++) {
+ for (i = 0; i < 90; i++)
*buff++ = a->read_csr(ioaddr, i);
- }
*buff++ = a->read_csr(ioaddr, 112);
*buff++ = a->read_csr(ioaddr, 114);
/* read bus configuration registers */
- for (i = 0; i < 30; i++) {
+ for (i = 0; i < 30; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
+
*buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
- for (i = 31; i < 36; i++) {
+
+ for (i = 31; i < 36; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
/* read mii phy registers */
if (lp->mii) {
@@ -1535,8 +1489,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err < 0) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "failed to enable device -- err=%d\n", err);
+ pr_err("failed to enable device -- err=%d\n", err);
return err;
}
pci_set_master(pdev);
@@ -1544,29 +1497,25 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_resource_start(pdev, 0);
if (!ioaddr) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "card has no PCI IO resources, aborting\n");
+ pr_err("card has no PCI IO resources, aborting\n");
return -ENODEV;
}
if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "architecture does not support 32bit PCI busmaster DMA\n");
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
return -ENODEV;
}
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
- NULL) {
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "io address range already allocated\n");
+ pr_err("io address range already allocated\n");
return -EBUSY;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
- if (err < 0) {
+ if (err < 0)
pci_disable_device(pdev);
- }
+
return err;
}
@@ -1616,7 +1565,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
a = &pcnet32_dwio;
} else {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "No access methods\n");
+ pr_err("No access methods\n");
goto err_release_region;
}
}
@@ -1624,11 +1573,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
- printk(KERN_INFO " PCnet chip version is %#x.\n",
- chip_version);
+ pr_info(" PCnet chip version is %#x\n", chip_version);
if ((chip_version & 0xfff) != 0x003) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "Unsupported chip version.\n");
+ pr_info("Unsupported chip version\n");
goto err_release_region;
}
@@ -1681,7 +1629,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (cards_found < MAX_UNITS && homepna[cards_found])
media |= 1; /* switch to home wiring mode */
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
(media & 1) ? "1" : "10");
a->write_bcr(ioaddr, 49, media);
break;
@@ -1697,9 +1645,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
break;
default:
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "PCnet version %#x, no PCnet32 chip.\n",
- chip_version);
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
goto err_release_region;
}
@@ -1721,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Memory allocation failed.\n");
+ pr_err("Memory allocation failed\n");
ret = -ENOMEM;
goto err_release_region;
}
@@ -1730,7 +1677,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+ pr_info("%s at %#3lx,", chipname, ioaddr);
/* In most chips, after a chip reset, the ethernet address is read from the
* station address PROM at the base address and programmed into the
@@ -1755,9 +1702,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" warning: CSR address invalid,\n");
- printk(KERN_INFO
- " using instead PROM address of");
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
}
memcpy(dev->dev_addr, promaddr, 6);
}
@@ -1769,54 +1715,54 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" %pM", dev->dev_addr);
+ pr_cont(" %pM", dev->dev_addr);
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk(KERN_INFO " tx_start_pt(0x%04x):", i);
+ pr_info(" tx_start_pt(0x%04x):", i);
switch (i >> 10) {
case 0:
- printk(KERN_CONT " 20 bytes,");
+ pr_cont(" 20 bytes,");
break;
case 1:
- printk(KERN_CONT " 64 bytes,");
+ pr_cont(" 64 bytes,");
break;
case 2:
- printk(KERN_CONT " 128 bytes,");
+ pr_cont(" 128 bytes,");
break;
case 3:
- printk(KERN_CONT "~220 bytes,");
+ pr_cont("~220 bytes,");
break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(KERN_CONT " BCR18(%x):", i & 0xffff);
+ pr_cont(" BCR18(%x):", i & 0xffff);
if (i & (1 << 5))
- printk(KERN_CONT "BurstWrEn ");
+ pr_cont("BurstWrEn ");
if (i & (1 << 6))
- printk(KERN_CONT "BurstRdEn ");
+ pr_cont("BurstRdEn ");
if (i & (1 << 7))
- printk(KERN_CONT "DWordIO ");
+ pr_cont("DWordIO ");
if (i & (1 << 11))
- printk(KERN_CONT "NoUFlow ");
+ pr_cont("NoUFlow ");
i = a->read_bcr(ioaddr, 25);
- printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
- printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27);
if (i & (1 << 14))
- printk(KERN_CONT "LowLatRx");
+ pr_cont("LowLatRx");
}
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev);
/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
- if ((lp->init_block =
- pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) {
+ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
+ &lp->init_dma_addr);
+ if (!lp->init_block) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "Consistent memory allocation failed.\n");
+ pr_err("Consistent memory allocation failed\n");
ret = -ENOMEM;
goto err_free_netdev;
}
@@ -1890,7 +1836,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (pdev) { /* use the IRQ provided by PCI */
dev->irq = pdev->irq;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(" assigned IRQ %d.\n", dev->irq);
+ pr_cont(" assigned IRQ %d\n", dev->irq);
} else {
unsigned long irq_mask = probe_irq_on();
@@ -1906,12 +1852,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev->irq = probe_irq_off(irq_mask);
if (!dev->irq) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line\n");
ret = -ENODEV;
goto err_free_ring;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", probed IRQ %d.\n", dev->irq);
+ pr_cont(", probed IRQ %d\n", dev->irq);
}
/* Set the mii phy_id so that we can query the link state */
@@ -1935,14 +1881,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->phymask |= (1 << i);
lp->mii_if.phy_id = i;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "Found PHY %04x:%04x at address %d.\n",
- id1, id2, i);
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
}
lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
- if (lp->phycount > 1) {
+ if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
- }
}
init_timer(&lp->watchdog_timer);
@@ -1966,7 +1910,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
cards_found++;
/* enable LED writes */
@@ -1995,10 +1939,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring_size,
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
@@ -2007,46 +1948,35 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring_size,
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->tx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->rx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->tx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->rx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -2115,12 +2045,11 @@ static int pcnet32_open(struct net_device *dev)
/* switch pcnet32 to 32bit mode */
lp->a.write_bcr(ioaddr, 20, 2);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
- dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
- (u32) (lp->rx_ring_dma_addr),
- (u32) (lp->init_dma_addr));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
/* set/reset autoselect bit */
val = lp->a.read_bcr(ioaddr, 2) & ~2;
@@ -2155,10 +2084,8 @@ static int pcnet32_open(struct net_device *dev)
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
- if (netif_msg_link(lp))
- printk(KERN_DEBUG
- "%s: Setting 100Mb-Full Duplex.\n",
- dev->name);
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
}
}
if (lp->phycount < 2) {
@@ -2246,9 +2173,7 @@ static int pcnet32_open(struct net_device *dev)
}
}
lp->mii_if.phy_id = first_phy;
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: Using PHY number %d.\n",
- dev->name, first_phy);
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
}
#ifdef DO_DXSUFLO
@@ -2295,18 +2220,17 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
- dev->name, i,
- (u32) (lp->init_dma_addr),
- lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
return 0; /* Always succeed */
- err_free_ring:
+err_free_ring:
/* free any allocated skbuffs */
pcnet32_purge_rx_ring(dev);
@@ -2316,7 +2240,7 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_bcr(ioaddr, 20, 4);
- err_free_irq:
+err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
free_irq(dev->irq, dev);
return rc;
@@ -2367,14 +2291,12 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
- if (!
- (rx_skbuff = lp->rx_skbuff[i] =
- dev_alloc_skb(PKT_BUF_SKB))) {
- /* there is not much, we can do at this point */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
- dev->name);
+ lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
return -1;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -2424,10 +2346,9 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
break;
- if (i >= 100 && netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_restart timed out waiting for stop.\n",
- dev->name);
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
pcnet32_purge_tx_ring(dev);
if (pcnet32_init_ring(dev))
@@ -2451,8 +2372,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
/* Transmitter timeout, serious problems. */
if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR
- "%s: transmit timed out, status %4.4x, resetting.\n",
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
dev->name, lp->a.read_csr(ioaddr, CSR0));
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
dev->stats.tx_errors++;
@@ -2495,11 +2415,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&lp->lock, flags);
- if (netif_msg_tx_queued(lp)) {
- printk(KERN_DEBUG
- "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
- }
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a.read_csr(ioaddr, CSR0));
/* Default status -- will not enable Successful-TxDone
* interrupt when that option is available to us.
@@ -2558,16 +2476,14 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
while ((csr0 & 0x8f00) && --boguscnt >= 0) {
- if (csr0 == 0xffff) {
+ if (csr0 == 0xffff)
break; /* PCMCIA remove happened */
- }
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG
- "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a.read_csr(ioaddr, CSR0));
/* Log misc errors. */
if (csr0 & 0x4000)
@@ -2587,10 +2503,8 @@ pcnet32_interrupt(int irq, void *dev_id)
dev->stats.rx_errors++; /* Missed a Rx frame. */
}
if (csr0 & 0x0800) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
/* unlike for the lance, there is no restart needed */
}
if (napi_schedule_prep(&lp->napi)) {
@@ -2606,9 +2520,9 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
}
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock(&lp->lock);
@@ -2630,10 +2544,9 @@ static int pcnet32_close(struct net_device *dev)
dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
- if (netif_msg_ifdown(lp))
- printk(KERN_DEBUG
- "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a.read_csr(ioaddr, CSR0));
/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
@@ -2698,7 +2611,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -2730,9 +2643,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
csr15 = lp->a.read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
- if (netif_msg_hw(lp))
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
- dev->name);
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
lp->init_block->mode =
cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
@@ -2819,10 +2730,8 @@ static int pcnet32_check_otherphy(struct net_device *dev)
mii.phy_id = i;
if (mii_link_ok(&mii)) {
/* found PHY with active link */
- if (netif_msg_link(lp))
- printk(KERN_INFO
- "%s: Using PHY number %d.\n",
- dev->name, i);
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
/* isolate inactive phy */
bmcr =
@@ -2868,8 +2777,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (!curr_link) {
if (prev_link || verbose) {
netif_carrier_off(dev);
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_info(lp, link, dev, "link down\n");
}
if (lp->phycount > 1) {
curr_link = pcnet32_check_otherphy(dev);
@@ -2881,12 +2789,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (netif_msg_link(lp)) {
struct ethtool_cmd ecmd;
mii_ethtool_gset(&lp->mii_if, &ecmd);
- printk(KERN_INFO
- "%s: link up, %sMbps, %s-duplex\n",
- dev->name,
- (ecmd.speed == SPEED_100) ? "100" : "10",
- (ecmd.duplex ==
- DUPLEX_FULL) ? "full" : "half");
+ netdev_info(dev, "link up, %sMbps, %s-duplex\n",
+ (ecmd.speed == SPEED_100)
+ ? "100" : "10",
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
}
bcr9 = lp->a.read_bcr(dev->base_addr, 9);
if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
@@ -2897,8 +2804,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
lp->a.write_bcr(dev->base_addr, 9, bcr9);
}
} else {
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link up\n", dev->name);
+ netif_info(lp, link, dev, "link up\n");
}
}
}
@@ -3010,7 +2916,7 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
@@ -3026,7 +2932,7 @@ static int __init pcnet32_init_module(void)
pcnet32_probe_vlbus(pcnet32_portlist);
if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
- printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ pr_info("%d cards_found\n", cards_found);
return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df8..65ed385c2ceb 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+ }
+
+
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index adbc0fded130..db1794546c56 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -277,6 +277,22 @@ int phy_device_register(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_register);
/**
+ * phy_find_first - finds the first PHY device on the bus
+ * @bus: the target MII bus
+ */
+struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ int addr;
+
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ if (bus->phy_map[addr])
+ return bus->phy_map[addr];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(phy_find_first);
+
+/**
* phy_prepare_link - prepares the PHY layer to monitor link status
* @phydev: target phy_device struct
* @handler: callback function for link status change notifications
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd7..ed2644a57500 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
static int smsc_phy_config_intr(struct phy_device *phydev)
{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
return smsc_phy_ack_interrupt (phydev);
}
+static int lan911x_config_init(struct phy_device *phydev)
+{
+ return smsc_phy_ack_interrupt(phydev);
+}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edbe..6d61602208c1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
- int speed; /* speed of the corresponding ppp channel*/
+ int speed; /* speed of the corresponding ppp channel*/
#endif /* CONFIG_PPP_MULTILINK */
};
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
- int len, totlen;
- int i, bits, hdrlen, mtu;
- int flen;
- int navail, nfree, nzero;
- int nbigger;
- int totspeed;
- int totfree;
+ int len, totlen;
+ int i, bits, hdrlen, mtu;
+ int flen;
+ int navail, nfree, nzero;
+ int nbigger;
+ int totspeed;
+ int totfree;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
- nfree = 0; /* # channels which have no packet already queued */
- navail = 0; /* total # of usable channels (not deregistered) */
- nzero = 0; /* number of channels with zero speed associated*/
- totfree = 0; /*total # of channels available and
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
+ nzero = 0; /* number of channels with zero speed associated*/
+ totfree = 0; /*total # of channels available and
*having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- i = 0;
- list_for_each_entry(pch, &ppp->channels, clist) {
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
pch->speed = pch->chan->speed;
- if (pch->avail) {
+ if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
- !pch->had_frag) {
+ !pch->had_frag) {
if (pch->speed == 0)
nzero++;
else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
++nfree;
++totfree;
}
- if (!pch->had_frag && i < ppp->nxchan)
- ppp->nxchan = i;
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
}
++i;
}
/*
- * Don't start sending this packet unless at least half of
- * the channels are free. This gives much better TCP
- * performance if we have a lot of channels.
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
*/
- if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ if (nfree == 0 || nfree < navail / 2)
+ return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
- p = skb->data;
- len = skb->len;
+ p = skb->data;
+ len = skb->len;
if (*p == 0) {
++p;
--len;
}
totlen = len;
- nbigger = len % nfree;
+ nbigger = len % nfree;
- /* skip to the channel after the one we last used
- and start at that one */
+ /* skip to the channel after the one we last used
+ and start at that one */
list = &ppp->channels;
- for (i = 0; i < ppp->nxchan; ++i) {
+ for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
break;
}
}
- /* create a fragment for each channel */
+ /* create a fragment for each channel */
bits = B;
- while (len > 0) {
+ while (len > 0) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
continue;
}
- pch = list_entry(list, struct channel, clist);
+ pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
- * Skip this channel if it has a fragment pending already and
- * we haven't given a fragment to all of the free channels.
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
- if (nfree > 0)
+ if (nfree > 0)
continue;
} else {
pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == NULL) {
- /* can't use this channel, it's being deregistered */
+ /* can't use this channel, it's being deregistered */
if (pch->speed == 0)
nzero--;
else
- totspeed -= pch->speed;
+ totspeed -= pch->speed;
spin_unlock_bh(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
nfree--;
- if (--navail == 0)
+ if (--navail == 0)
break;
continue;
}
/*
*if the channel speed is not set divide
- *the packet evenly among the free channels;
+ *the packet evenly among the free channels;
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
flen = len;
if (nfree > 0) {
if (pch->speed == 0) {
- flen = totlen/nfree ;
+ flen = totlen/nfree;
if (nbigger > 0) {
flen++;
nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
}
/*
- *check if we are on the last channel or
- *we exceded the lenght of the data to
+ *check if we are on the last channel or
+ *we exceded the lenght of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
*above formula will be equal or less than zero.
*Skip the channel in this case
*/
- if (flen <= 0) {
+ if (flen <= 0) {
pch->avail = 2;
spin_unlock_bh(&pch->downl);
continue;
}
- mtu = pch->chan->mtu - hdrlen;
- if (mtu < 4)
- mtu = 4;
+ mtu = pch->chan->mtu - hdrlen;
+ if (mtu < 4)
+ mtu = 4;
if (flen > mtu)
flen = mtu;
- if (flen == len)
- bits |= E;
- frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (flen == len)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
- q = skb_put(frag, flen + hdrlen);
+ q = skb_put(frag, flen + hdrlen);
- /* make the MP header */
+ /* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) {
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* try to send it down the channel */
chan = pch->chan;
- if (!skb_queue_empty(&pch->file.xq) ||
+ if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
- pch->had_frag = 1;
+ pch->had_frag = 1;
p += flen;
- len -= flen;
+ len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock_bh(&pch->downl);
}
- ppp->nxchan = i;
+ ppp->nxchan = i;
return 1;
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 0c768593aad0..c19dd4a6cd76 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -568,7 +568,7 @@ void gelic_net_set_multi(struct net_device *netdev)
status);
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
status = lv1_net_add_multicast_address(bus_id(card),
dev_id(card),
0, 1);
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 227b141c4fbd..2663b2fdc0bb 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1389,113 +1389,6 @@ static int gelic_wl_get_mode(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-/* SIOCIWFIRSTPRIV */
-static int hex2bin(u8 *str, u8 *bin, unsigned int len)
-{
- unsigned int i;
- static unsigned char *hex = "0123456789ABCDEF";
- unsigned char *p, *q;
- u8 tmp;
-
- if (len != WPA_PSK_LEN * 2)
- return -EINVAL;
-
- for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
- p = strchr(hex, toupper(str[i]));
- q = strchr(hex, toupper(str[i + 1]));
- if (!p || !q) {
- pr_info("%s: unconvertible PSK digit=%d\n",
- __func__, i);
- return -EINVAL;
- }
- tmp = ((p - hex) << 4) + (q - hex);
- *bin++ = tmp;
- }
- return 0;
-};
-
-static int gelic_wl_priv_set_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- unsigned int len;
- unsigned long irqflag;
- int ret = 0;
-
- pr_debug("%s:<- len=%d\n", __func__, data->data.length);
- len = data->data.length - 1;
- if (len <= 2)
- return -EINVAL;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- if (extra[0] == '"' && extra[len - 1] == '"') {
- pr_debug("%s: passphrase mode\n", __func__);
- /* pass phrase */
- if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
- pr_info("%s: passphrase too long\n", __func__);
- ret = -E2BIG;
- goto out;
- }
- memset(wl->psk, 0, sizeof(wl->psk));
- wl->psk_len = len - 2;
- memcpy(wl->psk, &(extra[1]), wl->psk_len);
- wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
- } else {
- ret = hex2bin(extra, wl->psk, len);
- if (ret)
- goto out;
- wl->psk_len = WPA_PSK_LEN;
- wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
- }
- set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
-out:
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:->\n", __func__);
- return ret;
-}
-
-static int gelic_wl_priv_get_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- char *p;
- unsigned long irqflag;
- unsigned int i;
-
- pr_debug("%s:<-\n", __func__);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- p = extra;
- if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
- if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
- for (i = 0; i < wl->psk_len; i++) {
- sprintf(p, "%02xu", wl->psk[i]);
- p += 2;
- }
- *p = '\0';
- data->data.length = wl->psk_len * 2;
- } else {
- *p++ = '"';
- memcpy(p, wl->psk, wl->psk_len);
- p += wl->psk_len;
- *p++ = '"';
- *p = '\0';
- data->data.length = wl->psk_len + 2;
- }
- } else
- /* no psk set */
- data->data.length = 0;
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:-> %d\n", __func__, data->data.length);
- return 0;
-}
-#endif
-
/* SIOCGIWNICKN */
static int gelic_wl_get_nick(struct net_device *net_dev,
struct iw_request_info *info,
@@ -1571,8 +1464,10 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
init_completion(&wl->scan_done);
/*
* If we have already a bss list, don't try to get new
+ * unless we are doing an ESSID scan
*/
- if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
+ if ((!essid_len && !always_scan)
+ && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
pr_debug("%s: already has the list\n", __func__);
complete(&wl->scan_done);
goto out;
@@ -1673,7 +1568,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
}
}
- /* put them in the newtork_list */
+ /* put them in the network_list */
for (i = 0, scan_info_size = 0, scan_info = buf;
scan_info_size < data_len;
i++, scan_info_size += be16_to_cpu(scan_info->size),
@@ -2009,7 +1904,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
/* PSK type */
wpa->psk_type = cpu_to_be16(wl->psk_type);
#ifdef DEBUG
- pr_debug("%s: sec=%s psktype=%s\nn", __func__,
+ pr_debug("%s: sec=%s psktype=%s\n", __func__,
wpasecstr(wpa->security),
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
"BIN" : "passphrase");
@@ -2019,9 +1914,9 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
* the debug log because this dumps your precious
* passphrase/key.
*/
- pr_debug("%s: psk=%s\n",
+ pr_debug("%s: psk=%s\n", __func__,
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
- (char *)"N/A" : (char *)wpa->psk);
+ "N/A" : wpa->psk);
#endif
#endif
/* issue wpa setup */
@@ -2406,40 +2301,10 @@ static const iw_handler gelic_wl_wext_handler[] =
IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
};
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-static struct iw_priv_args gelic_wl_private_args[] =
-{
- {
- .cmd = GELIC_WL_PRIV_SET_PSK,
- .set_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "set_psk"
- },
- {
- .cmd = GELIC_WL_PRIV_GET_PSK,
- .get_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "get_psk"
- }
-};
-
-static const iw_handler gelic_wl_private_handler[] =
-{
- gelic_wl_priv_set_psk,
- gelic_wl_priv_get_psk,
-};
-#endif
-
static const struct iw_handler_def gelic_wl_wext_handler_def = {
.num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
.standard = gelic_wl_wext_handler,
.get_wireless_stats = gelic_wl_get_wireless_stats,
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
- .num_private = ARRAY_SIZE(gelic_wl_private_handler),
- .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
- .private = gelic_wl_private_handler,
- .private_args = gelic_wl_private_args,
-#endif
};
static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8d..4ef0afbcbe1b 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
-static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
/* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
struct ql3_adapter *qdev = netdev_priv(ndev);
unregister_netdev(ndev);
- qdev = netdev_priv(ndev);
ql_disable_interrupts(qdev);
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..b40a851ec7d1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+
+#include "qlcnic_hdr.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 0
+#define QLCNIC_LINUX_VERSIONID "5.0.0"
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3_MAX_MTU (9600)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* Tx defines */
+#define MAX_BUFFERS_PER_CMD 32
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ u32 findex;
+ u32 num_entries;
+ u32 entry_size;
+ u32 reserved[5];
+};
+
+struct uni_data_desc{
+ u32 findex;
+ u32 size;
+ u32 reserved[5];
+};
+
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct qlcnic_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct qlcnic_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define QLCNIC_CDRP_CMD_MAX 0x0000001f
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+
+/*
+ * Context state
+ */
+#define QLCHAL_VERSION 1
+
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+};
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+};
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+};
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+};
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+};
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+};
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+
+struct qlcnic_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define QLCNIC_INTR_DEFAULT 0x04
+
+union qlcnic_nic_intr_coalesce_data {
+ struct {
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ } data;
+ u64 word;
+};
+
+struct qlcnic_nic_intr_coalesce {
+ u16 stats_time_us;
+ u16 rate_sample_time;
+ u16 flags;
+ u16 rsvd_1;
+ u32 low_threshold;
+ u32 high_threshold;
+ union qlcnic_nic_intr_coalesce_data normal;
+ union qlcnic_nic_intr_coalesce_data low;
+ union qlcnic_nic_intr_coalesce_data high;
+ union qlcnic_nic_intr_coalesce_data irq;
+};
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_START 0
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
+#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
+#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
+#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
+#define QLCNIC_C2C_OPCODE 22
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define QLCNIC_H2C_OPCODE_LAST 25
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_START 128
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
+#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
+#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0x01
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+};
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 rx_csum;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 rsrvd1;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 dev_state;
+ u8 diag_test;
+ u8 diag_cnt;
+ u8 rsrd1;
+ u16 rsrd2;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct qlcnic_adapter_stats stats;
+
+ struct qlcnic_recv_context recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ struct qlcnic_nic_intr_coalesce coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
+int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define QLCRD32(adapter, off) \
+ (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+ (qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a) \
+ qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
+int qlcnic_check_loopback_buff(unsigned char *data);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_brdinfo {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ if (tx_ring->producer < tx_ring->sw_consumer)
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..0a6a39914aec
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+u32
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature;
+ u32 rcode = QLCNIC_RCODE_SUCCESS;
+ struct pci_dev *pdev = adapter->pdev;
+
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter))
+ return QLCNIC_RCODE_TIMEOUT;
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "card response timeout.\n");
+ rcode = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ rcode);
+ }
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+
+ return rcode;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ QLCNIC_CDRP_CMD_SET_MTU)) {
+
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+ int err;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+ }
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err;
+ u64 phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(temp - 0x200));
+
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ adapter->tx_context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ }
+}
+
+int
+qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
+{
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_READ_PHY)) {
+
+ return -EIO;
+ }
+
+ return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+}
+
+int
+qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+{
+ return qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ val,
+ 0,
+ QLCNIC_CDRP_CMD_WRITE_PHY);
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err;
+ int ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr);
+ if (tx_ring->hw_consumer == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ *(tx_ring->hw_consumer) = 0;
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate rds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = (struct rcv_desc *)addr;
+
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate sds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = (struct status_desc *)addr;
+ }
+
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+ }
+
+ recv_ctx = &adapter->recv_ctx;
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->hw_consumer != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..8da6ec8c13b9
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_called",
+ QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished",
+ QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped",
+ QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped",
+ QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed",
+ QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts",
+ QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts",
+ QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes",
+ QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes",
+ QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Loopback_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ CRB_CMDPEG_STATE,
+ CRB_RCVPEG_STATE,
+ CRB_XG_STATE_P3,
+ CRB_FW_CAPABILITIES_1,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u16 pcifn = adapter->ahw.pci_func;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ecmd->speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ __u32 status;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ /* autonegotiation */
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ ecmd->autoneg) != 0)
+ return -EIO;
+ else
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (qlcnic_fw_cmd_query_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+
+ switch (ecmd->speed) {
+ case SPEED_10:
+ qlcnic_set_phy_speed(status, 0);
+ break;
+ case SPEED_100:
+ qlcnic_set_phy_speed(status, 1);
+ break;
+ case SPEED_1000:
+ qlcnic_set_phy_speed(status, 2);
+ break;
+ }
+
+ if (ecmd->duplex == DUPLEX_HALF)
+ qlcnic_clear_phy_duplex(status);
+ if (ecmd->duplex == DUPLEX_FULL)
+ qlcnic_set_phy_duplex(status);
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ *((int *)&status)) != 0)
+ return -EIO;
+ else {
+ adapter->link_speed = ecmd->speed;
+ adapter->link_duplex = ecmd->duplex;
+ }
+ } else
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ for (i = 0; diag_registers[i] != -1; i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+ regs_buff[i++] = 1; /* No. of tx ring */
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+ regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = 2; /* No. of rx ring */
+ regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 val;
+
+ val = QLCRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ /* read mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return QLCNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define QLC_ILB_PKT_SIZE 64
+
+static void qlcnic_create_loopback_buff(unsigned char *data)
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+ memset(data, 0x4e, QLC_ILB_PKT_SIZE);
+ memset(data, 0xff, 12);
+ memcpy(data + 12, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data)
+{
+ unsigned char buff[QLC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff);
+ return memcmp(data, buff, QLC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data);
+ skb_put(skb, QLC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ msleep(5);
+
+ qlcnic_process_rcv_ring_diag(sds_ring);
+
+ dev_kfree_skb_any(skb);
+ if (!adapter->diag_cnt)
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ ret = qlcnic_set_ilb_mode(adapter);
+ if (ret)
+ goto done;
+
+ ret = qlcnic_do_ilb_test(adapter);
+
+ qlcnic_clear_ilb_mode(adapter);
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_it;
+
+ adapter->diag_cnt = 0;
+ ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
+ QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+ if (ret)
+ goto done;
+
+ msleep(10);
+
+ ret = !adapter->diag_cnt;
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ }
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* link test */
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ qlcnic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (qlcnic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ }
+}
+
+static u32 qlcnic_get_rx_csum(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ return adapter->rx_csum;
+}
+
+static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ adapter->rx_csum = !!data;
+ return 0;
+}
+
+static u32 qlcnic_get_tso(struct net_device *dev)
+{
+ return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+}
+
+static int qlcnic_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+static int qlcnic_blink_led(struct net_device *dev, u32 val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ ret = qlcnic_config_led(adapter, 1, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ return ret;
+ }
+
+ msleep_interruptible(val * 1000);
+
+ ret = qlcnic_config_led(adapter, 0, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int hw_lro;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
+
+ ethtool_op_set_flags(netdev, data);
+
+ hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+
+ return 0;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = qlcnic_get_tso,
+ .set_tso = qlcnic_set_tso,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_rx_csum = qlcnic_get_rx_csum,
+ .set_rx_csum = qlcnic_set_rx_csum,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = qlcnic_set_flags,
+ .phys_id = qlcnic_blink_led,
+};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..0469f84360a4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MN_2M (0)
+#define QLCNIC_PCI_MS_2M (0x80000)
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
+#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
+#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
+
+#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
+#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
+
+#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0 (QLCNIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compability
+ */
+#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
+#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
+#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
+#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
+
+ /* Device State */
+#define QLCNIC_DEV_COLD 1
+#define QLCNIC_DEV_INITALIZING 2
+#define QLCNIC_DEV_READY 3
+#define QLCNIC_DEV_NEED_RESET 4
+#define QLCNIC_DEV_NEED_QUISCENT 5
+#define QLCNIC_DEV_FAILED 6
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..8ea7f869e293
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+
+static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static const struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ unsigned op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
+ u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ if (cur == NULL) {
+ dev_err(&adapter->netdev->dev,
+ "failed to add mac address filter\n");
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+
+ return qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dev_mc_list *mc_ptr;
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ for (mc_ptr = netdev->mc_list; mc_ptr;
+ mc_ptr = mc_ptr->next) {
+ qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
+ &del_list);
+ }
+ }
+
+send_fw_cmd:
+ qlcnic_nic_set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ adapter->flags ^= QLCNIC_LRO_ENABLED;
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ req.words[1] = cpu_to_le64(ip);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x reuqest\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu > P3_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
+ P3_MAX_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+{
+ u32 crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = QLCRD32(adapter, crbaddr);
+ mac_hi = QLCRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+ adapter->ahw.crb_win = window;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+
+ return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if ((addr & 0x00ff800) == 0xff800) {
+ if (printk_ratelimit())
+ dev_warn(&pdev->dev, "QM access not handled\n");
+ return -EIO;
+ }
+
+ window = OCM_WIN_P3P(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 data)
+{
+ int i, j, ret;
+ u32 temp, off8;
+ u64 stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ i = 0;
+ if (stride == 16) {
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+ }
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val, stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+ return qlcnic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ off8 = MIU_TEST_AGT_RDDATA_LO;
+ if ((stride == 16) && (off & 0xf))
+ off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+ temp = readl(mem_crb + off8 + 4);
+ val = (u64)temp << 32;
+ val |= readl(mem_crb + off8);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw.board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev,
+ "%sting loopback mode failed.\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_set_fw_loopback(adapter, 1))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..ea00ab4d4feb
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1541 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "qlcnic.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ size = sizeof(struct qlcnic_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ return -ENOMEM;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
+ vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ dev_err(&netdev->dev, "Failed to allocate "
+ "rx buffer ring %d\n", ring);
+ goto err_out;
+ }
+ memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = QLCNIC_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ int addr, int *valp)
+{
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* resetall */
+ qlcnic_rom_lock(adapter);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qlcnic_rom_unlock(adapter);
+
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* p2dn replyCount */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 & 1*/
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+static int
+qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ u32 i;
+ __le32 entries;
+ int mn_present = qlcnic_has_mn(adapter);
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -1;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ u32 flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -1;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(
+ *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + QLCNIC_UNI_BIOS_VERSION_OFF));
+
+ return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = qlcnic_get_fw_version(adapter);
+
+ version = QLCNIC_DECODE_VERSION(val);
+
+ major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (version > QLCNIC_VERSION_CODE(major, minor, build))
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (fw) {
+ __le64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ __le32 val;
+ u32 ver, min_ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_set_product_offs(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+
+ min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
+
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer */
+ if (qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&val))
+ return -EIO;
+
+ val = QLCNIC_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 60;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 2000;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
+ "length %d\n", cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!buffer->skb)
+ return -ENOMEM;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = QLCNIC_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = QLCNIC_BUFFER_FREE;
+ return skb;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+
+ int count = 0;
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct qlcnic_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct qlcnic_rx_buffer, list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ spin_lock(&rds_ring->lock);
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ spin_unlock(&rds_ring->lock);
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ skb_put(skb, rds_ring->skb_size);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+
+ if (!qlcnic_check_loopback_buff(skb->data))
+ adapter->diag_cnt++;
+
+ dev_kfree_skb_any(skb);
+
+ return buffer;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0;
+
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
+ ring, sts_data0);
+
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..665e8e56b6a8
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2720 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+
+MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
+ QLCNIC_LINUX_VERSIONID;
+
+static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0644);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout_task(struct work_struct *work);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
+static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+
+ if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+ netif_stop_queue(adapter->netdev);
+ smp_mb();
+ }
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return (recv_ctx->sds_rings == NULL);
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u64 mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
+{
+ int change, shift, err;
+ u64 mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = QLCRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (shift > 9)
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
+ (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
+ data = QLCNIC_PORT_MODE_802_3_AP;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_XG) {
+ data = QLCNIC_PORT_MODE_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else {
+ data = QLCNIC_PORT_MODE_AUTO_NEG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+ }
+ QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+
+ qlcnic_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+
+ qlcnic_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ qlcnic_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw.pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+ void __iomem *mem_ptr0 = NULL;
+ resource_size_t mem_base;
+ unsigned long mem_len, pci_len0 = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_len0 = pci_len0;
+
+ adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ return 0;
+}
+
+static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
+ qlcnic_boards[i].sub_device == pdev->subsystem_device) {
+ strcpy(name, qlcnic_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+
+ if (!found)
+ name = "Unknown";
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char serial_num[32];
+ int i, offset, val;
+ int *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (int *)&serial_num;
+ offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ i = QLCRD32(adapter, QLCNIC_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+ fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ adapter->flags &= ~QLCNIC_LRO_ENABLED;
+
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int val, err, first_boot;
+
+ err = qlcnic_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ if (!qlcnic_can_start_firmware(adapter))
+ goto wait_init;
+
+ first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
+ if (first_boot == 0x55555555)
+ /* This is the first boot after power up */
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+
+ qlcnic_request_firmware(adapter);
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto wait_init;
+
+ if (first_boot != 0x55555555) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ qlcnic_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ qlcnic_set_port_mode(adapter);
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+
+ val = (_QLCNIC_LINUX_MAJOR << 16)
+ | ((_QLCNIC_LINUX_MINOR << 8))
+ | (_QLCNIC_LINUX_SUBVERSION);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, val);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = qlcnic_phantom_init(adapter);
+ if (err)
+ goto err_out;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+
+ qlcnic_update_dma_mask(adapter);
+
+ qlcnic_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ handler = qlcnic_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = qlcnic_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
+{
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_init_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ return err;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_rxbuf;
+ }
+
+ qlcnic_init_coalesce_defaults(adapter);
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ }
+ }
+
+ qlcnic_detach(adapter);
+
+ adapter->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ return;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->diag_test = test;
+
+ ret = qlcnic_attach(adapter);
+ if (ret)
+ return ret;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_enable_int(sds_ring);
+ }
+ }
+
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err)
+ err = __qlcnic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->mc_enabled = 0;
+ adapter->max_mc_count = 38;
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = 2*HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_GRO);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ int err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ if (!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ mutex_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ err = qlcnic_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+
+ qlcnic_clear_stats(adapter);
+
+ qlcnic_setup_intr(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ qlcnic_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+
+err_out_decr_ref:
+ qlcnic_clr_all_drv_state(adapter);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_diag_entries(adapter);
+
+ qlcnic_cleanup_pci_map(adapter);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ qlcnic_detach(adapter);
+err_out:
+ qlcnic_clr_all_drv_state(adapter);
+ return err;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ qlcnic_detach(adapter);
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+qlcnic_tso_check(struct net_device *netdev,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (vlan_tx_tag_present(skb)) {
+
+ flags = FLAGS_VLAN_OOB;
+ vid = vlan_tx_tag_get(skb);
+ qlcnic_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = pci_map_page(pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ dev_info(&netdev->dev, "NIC Link is down\n");
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw.linkup && linkup) {
+ dev_info(&netdev->dev, "NIC Link is up\n");
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void qlcnic_tx_timeout_task(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter =
+ container_of(work, struct qlcnic_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (!qlcnic_reset_context(adapter)) {
+ adapter->netdev->trans_start = jiffies;
+ return;
+
+ /* context reset failed, fall through for fw reset */
+ }
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->diag_cnt++;
+ qlcnic_enable_int(sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ int done;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ __netif_tx_lock(tx_ring->txq, smp_processor_id());
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ __netif_tx_unlock(tx_ring->txq);
+ }
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ disable_irq(adapter->irq);
+ qlcnic_intr(adapter->irq, adapter);
+ enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return ;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ val |= ((u32)0x1 << (adapter->portnum * 4));
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+
+ if (!(val & 0x11111111))
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ int cnt = 0;
+ int portnum = adapter->portnum;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ if (!(val & ((int)0x1 << (portnum * 4)))) {
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
+ goto start_fw;
+ }
+
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+start_fw:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ qlcnic_api_unlock(adapter);
+ return 0;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ qlcnic_api_unlock(adapter);
+ return -1;
+ }
+
+ qlcnic_api_unlock(adapter);
+ msleep(1000);
+ while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
+ ++cnt < 20)
+ msleep(1000);
+
+ if (cnt >= 20)
+ return -1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ int dev_state;
+
+ if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ goto err_ret;
+
+ if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+
+ if (qlcnic_check_drv_state(adapter)) {
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+
+ goto err_ret;
+ }
+
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ rtnl_lock();
+ qlcnic_detach(adapter);
+ rtnl_unlock();
+
+ status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == QLCNIC_TEMP_PANIC)
+ goto err_ret;
+
+ qlcnic_set_drv_state(adapter, adapter->dev_state);
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+
+}
+
+static void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ }
+
+ qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err) {
+ qlcnic_detach(adapter);
+ goto done;
+ }
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ u32 state = 0, heartbit;
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ qlcnic_dev_request_reset(adapter);
+ goto detach;
+ }
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ adapter->need_fw_reset = 1;
+
+ heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ qlcnic_dev_request_reset(adapter);
+
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+
+ return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 4) || (offset & 0x3))
+ return -EINVAL;
+
+ if (offset < QLCNIC_PCI_CRBSPACE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static int
+qlcnic_destip_supported(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (!qlcnic_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+ return;
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ qlcnic_config_indev_addr(dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL || !netif_running(dev))
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter || !qlcnic_destip_supported(adapter))
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown
+};
+
+static int __init qlcnic_init_module(void)
+{
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+
+ return pci_register_driver(&qlcnic_driver);
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..57d135e3bfaf 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -19,14 +19,6 @@
#define DRV_VERSION "v1.00.00.23.00.00-01"
#define PFX "qlge: "
-#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
- do { \
- if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
- ; \
- else \
- dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
- "%s: " fmt, __func__, ##args); \
- } while (0)
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -54,12 +46,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
-#define MAX_SPLIT_SIZE 1023
-#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +67,43 @@
#define TX_DESC_PER_OAL 0
#endif
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
*/
enum {
MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
};
/*
@@ -468,7 +484,7 @@ enum {
MDIO_PORT = 0x00000440,
MDIO_STATUS = 0x00000450,
- /* XGMAC AUX statistics registers */
+ XGMAC_REGISTER_END = 0x00000740,
};
/*
@@ -509,6 +525,7 @@ enum {
enum {
MAC_ADDR_IDX_SHIFT = 4,
MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
MAC_ADDR_TYPE_MASK = 0x000f0000,
MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +543,30 @@ enum {
MAC_ADDR_MR = (1 << 30),
MAC_ADDR_MW = (1 << 31),
MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
};
/*
@@ -596,6 +637,7 @@ enum {
enum {
RT_IDX_IDX_SHIFT = 8,
RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
RT_IDX_TYPE_RT = 0x00000000,
RT_IDX_TYPE_RT_INV = 0x00010000,
RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +706,89 @@ enum {
RT_IDX_UNUSED013 = 13,
RT_IDX_UNUSED014 = 14,
RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_SLOTS = 16,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
};
/*
@@ -737,6 +861,21 @@ enum {
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
/*
* CAM output format.
*/
@@ -1421,7 +1560,7 @@ struct nic_stats {
u64 rx_nic_fifo_drop;
};
-/* Address/Length pairs for the coredump. */
+/* Firmware coredump internal register address/length pairs. */
enum {
MPI_CORE_REGS_ADDR = 0x00030000,
MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1615,7 @@ struct mpi_coredump_segment_header {
u8 description[16];
};
-/* Reg dump segment numbers. */
+/* Firmware coredump header segment numbers. */
enum {
CORE_SEG_NUM = 1,
TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1666,67 @@ enum {
};
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
struct ql_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
@@ -1568,6 +1768,199 @@ struct ql_reg_dump {
u32 ets[8+2];
};
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
/*
* intr_context structure is used during initialization
* to hook the interrupts. It is also used in a single
@@ -1603,6 +1996,8 @@ enum {
QL_CAM_RT_SET = 8,
QL_SELFTEST = 9,
QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
};
/* link_status bit definitions */
@@ -1724,6 +2119,8 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
u32 link_config;
u32 led_config;
u32 max_frame_size;
@@ -1736,9 +2133,11 @@ struct ql_adapter {
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
struct completion ide_completion;
struct nic_operations *nic_ops;
u16 device_id;
+ struct timer_list timer;
atomic_t lb_count;
};
@@ -1807,6 +2206,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2217,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_sys_err(struct ql_adapter *qdev);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2242,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c4710761..ff8550d2ca82 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
#include "qlge.h"
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
{
@@ -43,8 +443,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower MAC address */
@@ -55,8 +455,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower Mcast address */
@@ -79,8 +479,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
for (i = 0; i < 16; i++) {
status = ql_get_routing_reg(qdev, i, &value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of routing index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register.\n");
goto err;
} else {
*buf++ = value;
@@ -91,6 +491,226 @@ err:
return status;
}
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
/* Create a coredump segment header */
static void ql_build_coredump_seg_header(
struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,526 @@ static void ql_build_coredump_seg_header(
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
+ return -ENOMEM;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up.\n");
+ return;
+ }
+
+ if (ql_mb_sys_err(qdev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Fail force coredump with ql_mb_sys_err().\n");
+ return;
+ }
+}
+
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump)
{
@@ -178,6 +1318,37 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+
+ if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ ql_get_core_dump(qdev);
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x \n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
}
#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 058fa0a48c6f..4f26afeb0f38 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -67,8 +67,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -89,8 +89,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -107,8 +107,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- QPRINTK(qdev, DRV, ERR,
- "Couldn't get xgmac sem.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
goto quit;
}
/*
@@ -116,8 +116,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -129,8 +130,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -142,8 +144,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x500; i < 0x540; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -155,8 +158,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -167,8 +171,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX NIC FIFO DROP statistics.
*/
if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
@@ -396,14 +400,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
qdev->wol = wol->wolopts;
- QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
- qdev->wol, ndev->name);
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
if (!qdev->wol) {
u32 wol = 0;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "cleared sucessfully" : "clear failed",
- wol, qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
+ status == 0 ? "cleared sucessfully" : "clear failed",
+ wol);
}
return 0;
@@ -534,8 +537,8 @@ static void ql_self_test(struct net_device *ndev,
}
clear_bit(QL_SELFTEST, &qdev->flags);
} else {
- QPRINTK(qdev, DRV, ERR,
- "%s: is down, Loopback test will fail.\n", ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84faef..2c052caee884 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
@@ -116,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
- QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
@@ -156,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
/* check for errors */
if (temp & err_bit) {
- QPRINTK(qdev, PROBE, ALERT,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
- QPRINTK(qdev, PROBE, ALERT,
- "Timed out waiting for reg %x to come ready.\n", reg);
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
@@ -209,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
@@ -219,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for CFG to come ready.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
goto exit;
}
@@ -301,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -359,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- QPRINTK(qdev, IFUP, DEBUG,
- "Adding %s address %pM"
- " at index %d in the CAM.\n",
- ((type ==
- MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
- "UNICAST"), addr, index);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Adding %s address %pM at index %d in the CAM.\n",
+ type == MAC_ADDR_TYPE_MULTI_MAC ?
+ "MULTICAST" : "UNICAST",
+ addr, index);
status =
ql_wait_reg_rdy(qdev,
@@ -414,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
- (enable_bit ? "Adding" : "Removing"),
- index, (enable_bit ? "to" : "from"));
+ netif_info(qdev, ifup, qdev->ndev,
+ "%s VLAN ID %d %s the CAM.\n",
+ enable_bit ? "Adding" : "Removing",
+ index,
+ enable_bit ? "to" : "from");
status =
ql_wait_reg_rdy(qdev,
@@ -431,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -451,16 +464,13 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (set) {
addr = &qdev->ndev->dev_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5]);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Clearing MAC address on %s\n",
- qdev->ndev->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -469,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
- "address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
@@ -522,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- QPRINTK(qdev, IFUP, DEBUG,
- "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
- (enable ? "Adding" : "Removing"),
- ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
- ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
- ((index ==
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
- ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
- ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
- ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
- ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
- ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
- ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
- ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
- ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
- ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
- ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
- ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
- ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
- ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
- (enable ? "to" : "from"));
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s %s mask %s the routing reg.\n",
+ enable ? "Adding" : "Removing",
+ index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
+ index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
+ index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
+ index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
+ index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
+ index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
+ index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
+ index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
+ index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
+ index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
+ index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
+ index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
+ index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
+ index == RT_IDX_UNUSED013 ? "UNUSED13" :
+ index == RT_IDX_UNUSED014 ? "UNUSED14" :
+ index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
+ "(Bad index != RT_IDX)",
+ enable ? "to" : "from");
switch (mask) {
case RT_IDX_CAM_HIT:
@@ -602,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
break;
}
default:
- QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
- mask);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
@@ -709,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
@@ -717,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
csum += le16_to_cpu(*flash++);
if (csum)
- QPRINTK(qdev, IFUP, ERR,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
@@ -770,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
}
@@ -779,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -797,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
- QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
@@ -831,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
@@ -841,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -959,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
- QPRINTK(qdev, LINK, INFO,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
- QPRINTK(qdev, LINK, CRIT,
- "Port initialize timed out.\n");
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
}
return status;
}
- QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
@@ -1099,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
- QPRINTK(qdev, DRV, ERR,
- "page allocation failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
@@ -1110,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
- QPRINTK(qdev, DRV, ERR,
- "PCI mapping failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
@@ -1148,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- QPRINTK(qdev, IFUP, ERR,
- "Could not get a page chunk.\n");
- return;
- }
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk.\n");
+ return;
+ }
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
@@ -1181,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
@@ -1201,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->sbq_free_cnt > 16) {
for (i = 0; i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, PROBE, ERR,
- "Couldn't get an skb.\n");
+ netif_err(qdev, probe, qdev->ndev,
+ "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1223,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -1247,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
@@ -1281,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
* then its an OAL.
*/
if (i == 7) {
- QPRINTK(qdev, TX_DONE, DEBUG,
- "unmapping OAL area.\n");
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
@@ -1291,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev,
maplen),
PCI_DMA_TODEVICE);
} else {
- QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
- i);
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
@@ -1317,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev,
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
@@ -1326,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping failed with error: %d\n", err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
@@ -1373,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev,
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping outbound address list with error: %d\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
goto map_error;
}
@@ -1403,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping frags failed with error: %d.\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
goto map_error;
}
@@ -1433,6 +1447,260 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ rx_frag += nr_frags;
+ rx_frag->page = lbq_desc->p.pg_chunk.page;
+ rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
+ rx_frag->size = length;
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
+ else
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
+ else
+ napi_gro_receive(napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ netif_err(qdev, probe, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1467,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
@@ -1486,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No Data buffer in this packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Headers in small, data of %d bytes in small, combine them.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
@@ -1520,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
maplen),
PCI_DMA_FROMDEVICE);
} else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes in a single small buffer.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
@@ -1536,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Header in small, %d bytes in large. Chain large to small!\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Chaining page at offset = %d,"
- "for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
@@ -1563,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
- QPRINTK(qdev, PROBE, DEBUG,
- "No skb available, drop the packet.\n");
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
@@ -1573,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
pci_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1615,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* a local buffer and use it to find the
* pages to chain.
*/
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers & data in chain of large.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
@@ -1626,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1646,29 +1919,28 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No skb available, drop packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
dev_kfree_skb_any(skb);
rx_ring->rx_errors++;
return;
@@ -1693,17 +1965,18 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
@@ -1716,8 +1989,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1726,8 +1999,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -1753,6 +2026,66 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ struct bq_desc *lbq_desc;
+
+ /* Free small buffer that holds the IAL */
+ lbq_desc = ql_get_curr_sbuf(rx_ring);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Dropping frame, len %d > mtu %d\n",
+ length, qdev->ndev->mtu);
+
+ /* Unwind the large buffers for this frame. */
+ while (length > 0) {
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ length -= (length < rx_ring->lbq_buf_size) ?
+ length : rx_ring->lbq_buf_size;
+ put_page(lbq_desc->p.pg_chunk.page);
+ }
+ }
+
+ return (unsigned long)length;
+}
+
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
@@ -1774,20 +2107,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Total descriptor length did not match transfer length.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too short to be legal, not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too long, but sent anyway.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "PCI backplane error. Frame not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
@@ -1817,33 +2150,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR,
- "Management Processor Fatal Error.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
- QPRINTK(qdev, LINK, ERR,
- "Multiple CAM hits lookup occurred.\n");
- QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+ netif_err(qdev, link, qdev->ndev,
+ "Multiple CAM hits lookup occurred.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- QPRINTK(qdev, RX_ERR, ERR,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
- QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
- ib_ae_rsp->event);
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
@@ -1860,9 +2195,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
@@ -1873,9 +2208,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
@@ -1907,9 +2242,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
@@ -1925,11 +2260,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
net_rsp);
break;
default:
- {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
}
count++;
ql_update_cq(rx_ring);
@@ -1950,8 +2284,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
- QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
- rx_ring->cq_id);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
@@ -1963,9 +2297,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
@@ -1975,9 +2309,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
@@ -1994,12 +2328,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr
qdev->vlgrp = grp;
if (grp) {
- QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Turning off VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -2015,7 +2350,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -2032,7 +2368,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -2061,7 +2398,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
@@ -2074,10 +2412,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- QPRINTK(qdev, INTR, ERR,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2090,7 +2429,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
- QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
@@ -2105,8 +2445,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
- QPRINTK(qdev, INTR, INFO,
- "Waking handler for rx_ring[0].\n");
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
@@ -2203,9 +2543,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- QPRINTK(qdev, TX_QUEUED, INFO,
- "%s: shutting down tx queue %d du to lack of resources.\n",
- __func__, tx_ring_idx);
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: shutting down tx queue %d du to lack of resources.\n",
+ __func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
@@ -2226,8 +2566,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
- vlan_tx_tag_get(skb));
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
@@ -2241,8 +2581,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "Could not map the segments.\n");
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2253,8 +2593,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
return NETDEV_TX_OK;
@@ -2285,8 +2626,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev,
PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of RX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2294,8 +2635,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of TX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2349,7 +2690,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
tx_ring->q =
@@ -2400,7 +2741,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
- QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
@@ -2527,7 +2869,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2540,8 +2882,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
goto err_mem;
}
@@ -2552,8 +2894,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2569,8 +2911,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
@@ -2580,8 +2922,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2610,10 +2952,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
@@ -2644,16 +2986,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "RX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "TX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
goto err_mem;
}
}
@@ -2788,14 +3130,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
- QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
- rx_ring->type);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
}
- QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
@@ -2841,10 +3184,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded WQICB.\n");
return err;
}
@@ -2898,15 +3242,15 @@ static void ql_enable_msix(struct ql_adapter *qdev)
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
- QPRINTK(qdev, IFUP, WARNING,
- "MSI-X Enable failed, trying MSI.\n");
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
} else if (err == 0) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
return;
}
}
@@ -2915,13 +3259,14 @@ msi:
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "Running with MSI interrupts.\n");
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
- QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
@@ -3093,12 +3438,12 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msix interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msi interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msi interrupt %d.\n", i);
}
}
}
@@ -3123,32 +3468,33 @@ static int ql_request_irq(struct ql_adapter *qdev)
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed request for MSIX interrupt %d.\n",
- i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
goto err_irq;
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[i].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[i].type ==
- TX_Q ? "TX_Q" : "",
- qdev->rx_ring[i].type ==
- RX_Q ? "RX_Q" : "", intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[i].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[i].type == TX_Q ?
+ "TX_Q" :
+ qdev->rx_ring[i].type == RX_Q ?
+ "RX_Q" : "",
+ intr_context->name);
}
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "trying msi or legacy interrupts.\n");
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: irq = %d.\n", __func__, pdev->irq);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
@@ -3158,20 +3504,20 @@ static int ql_request_irq(struct ql_adapter *qdev)
if (status)
goto err_irq;
- QPRINTK(qdev, IFUP, ERR,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[0].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
- QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
ql_free_irq(qdev);
return status;
}
@@ -3205,14 +3551,15 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded RICB.\n");
return status;
}
@@ -3227,9 +3574,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev)
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM "
- "packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
break;
}
}
@@ -3253,14 +3599,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for error packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for broadcast packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
@@ -3270,8 +3616,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for MATCH RSS packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
@@ -3279,8 +3625,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
@@ -3298,13 +3644,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
@@ -3332,15 +3678,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
@@ -3369,8 +3715,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start rx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
return status;
}
}
@@ -3381,7 +3727,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
@@ -3390,8 +3736,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start tx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
return status;
}
}
@@ -3399,20 +3745,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++) {
- QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
- i);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Enabling NAPI for rx_ring[%d].\n", i);
napi_enable(&qdev->rx_ring[i].napi);
}
@@ -3429,7 +3775,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
@@ -3452,8 +3798,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
- QPRINTK(qdev, IFDOWN, ERR,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
@@ -3466,16 +3812,17 @@ static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- QPRINTK(qdev, PROBE, INFO,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
@@ -3492,23 +3839,23 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
- qdev->wol);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
return status;
} else
- QPRINTK(qdev, DRV, INFO,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
@@ -3516,9 +3863,10 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Sucessfully set" : "Failed", wol,
- qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Sucessfully set" : "Failed",
+ wol, qdev->ndev->name);
}
return status;
@@ -3538,6 +3886,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -3558,8 +3907,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
status = ql_adapter_reset(qdev);
if (status)
- QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
- qdev->func);
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
return status;
}
@@ -3569,7 +3918,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
err = ql_adapter_initialize(qdev);
if (err) {
- QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3601,7 +3950,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
- QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
@@ -3612,6 +3961,16 @@ static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
@@ -3681,9 +4040,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- QPRINTK(qdev, IFUP, DEBUG,
- "lbq_buf_size %d, order = %d\n",
- rx_ring->lbq_buf_size, qdev->lbq_buf_order);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size,
+ qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
@@ -3747,14 +4107,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- QPRINTK(qdev, IFUP, ERR,
- "Waiting for adapter UP...\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for adapter UP\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
@@ -3780,8 +4140,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
return status;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device.\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
@@ -3793,28 +4153,25 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
- QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
- QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
- } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
- (ndev->mtu == 9000 && new_mtu == 9000)) {
- return 0;
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
+ ndev->mtu = new_mtu;
+
if (!netif_running(qdev->ndev)) {
- ndev->mtu = new_mtu;
return 0;
}
- ndev->mtu = new_mtu;
status = ql_change_rx_buffers(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Changing MTU failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
}
return status;
@@ -3874,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3884,8 +4241,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3897,12 +4254,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3911,15 +4268,15 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
- if (ndev->mc_count) {
+ if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
@@ -3927,16 +4284,16 @@ static void qlge_set_multicast_list(struct net_device *ndev)
i++, mc_ptr = mc_ptr->next)
if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to loadmulticast address.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set multicast match mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3961,7 +4318,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
- QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -3994,8 +4351,8 @@ static void ql_asic_reset_work(struct work_struct *work)
rtnl_unlock();
return;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
@@ -4094,6 +4451,7 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -4175,6 +4533,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ dev_err(&pdev->dev, "Coredump alloc failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
@@ -4204,6 +4573,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
if (!cards_found) {
@@ -4234,6 +4604,21 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+}
+
static int __devinit qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
@@ -4285,6 +4670,14 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
@@ -4305,6 +4698,8 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
static void __devexit qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4327,6 +4722,7 @@ static void ql_eeh_close(struct net_device *ndev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4346,6 +4742,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
@@ -4359,6 +4756,8 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4381,11 +4780,18 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -4395,19 +4801,19 @@ static void qlge_io_resume(struct pci_dev *pdev)
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
- if (ql_adapter_reset(qdev))
- QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
- QPRINTK(qdev, IFUP, ERR,
- "Device initialization failed after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
return;
}
} else {
- QPRINTK(qdev, IFUP, ERR,
- "Device was not running prior to EEH.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
}
@@ -4424,6 +4830,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
int err;
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
@@ -4454,7 +4861,7 @@ static int qlge_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4468,6 +4875,8 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d4..3c00462a5d22 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
#include "qlge.h"
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
return status;
}
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -57,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
@@ -130,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
- QPRINTK(qdev, DRV, ERR, "Enter!\n");
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
/* Get the status data and start up a thread to
* handle the request.
*/
@@ -138,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
@@ -162,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting RISC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
} else
/* Wake up the sleeping mpi_idc_work thread that is
@@ -181,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "%s: Could not get mailbox status.\n", __func__);
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
return;
}
qdev->link_status = mbcp->mbox_out[1];
- QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
/* If we're coming back from an IDC event
* then set up the CAM and frame routing.
@@ -195,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return;
} else
clear_bit(QL_CAM_RT_SET, &qdev->flags);
@@ -207,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* to our liking.
*/
if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
set_bit(QL_PORT_CFG, &qdev->flags);
/* Begin polled mode early so
* we don't get another interrupt
@@ -229,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
ql_link_off(qdev);
}
@@ -242,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
return status;
}
@@ -257,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
return status;
}
@@ -272,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
else {
int i;
- QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
- QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
}
@@ -293,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
- QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
}
}
@@ -320,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
goto end;
}
@@ -410,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
- QPRINTK(qdev, DRV, ERR,
- "Firmware initialization failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
status = -EIO;
ql_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
- QPRINTK(qdev, DRV, ERR,
- "System Error.\n");
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
ql_queue_fw_error(qdev);
status = -EIO;
break;
@@ -431,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Need to support AEN 8110 */
break;
default:
- QPRINTK(qdev, DRV, ERR,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
/* Clear the MPI firmware status. */
}
end:
@@ -505,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
goto done;
} while (time_before(jiffies, count));
- QPRINTK(qdev, DRV, ERR,
- "Timed out waiting for mailbox complete.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
status = -ETIMEDOUT;
goto end;
@@ -529,6 +606,22 @@ end:
return status;
}
+int ql_mb_sys_err(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 0;
+
+ mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ return status;
+}
/* Get MPI firmware version. This will be used for
* driver banner and for ethtool info.
@@ -552,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed about firmware command\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
status = -EIO;
}
@@ -584,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Firmware State.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
status = -EIO;
}
@@ -594,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
* happen.
*/
if (mbcp->mbox_out[1] & 1) {
- QPRINTK(qdev, DRV, ERR,
- "Firmware waiting for initialization.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
status = -EIO;
}
@@ -627,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed IDC ACK send.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
status = -EIO;
}
return status;
@@ -659,16 +751,72 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- QPRINTK(qdev, DRV, ERR,
- "Port Config sent, wait for IDC.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
} else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Set Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
status = -EIO;
}
return status;
}
+int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
@@ -691,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
status = -EIO;
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "Passed Get Port Configuration.\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
qdev->link_config = mbcp->mbox_out[1];
qdev->max_frame_size = mbcp->mbox_out[2];
}
@@ -723,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -766,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -793,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
wait_for_completion_timeout(&qdev->ide_completion,
wait_time);
if (!wait_time) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Timeout.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
}
/* Now examine the response from the IDC process.
@@ -802,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev)
* more wait time.
*/
if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Time Extension from function.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Success.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
status = 0;
break;
} else {
- QPRINTK(qdev, DRV, ERR,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
status = -EIO;
break;
}
@@ -842,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
status = -EIO;
}
@@ -868,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
status = -EIO;
} else
qdev->led_config = mbcp->mbox_out[1];
@@ -899,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
/* This indicates that the firmware is
* already in the state we are trying to
* change it to.
*/
- QPRINTK(qdev, DRV, ERR,
- "Command parameters make no change.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
}
return status;
}
@@ -938,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
}
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get MPI traffic control.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
status = -EIO;
}
return status;
@@ -999,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
status = ql_mb_get_port_cfg(qdev);
rtnl_unlock();
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to get port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
goto err;
}
@@ -1013,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = ql_set_port_cfg(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to set port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
goto err;
}
end:
@@ -1046,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work)
switch (aen) {
default:
- QPRINTK(qdev, DRV, ERR,
- "Bug: Unhandled IDC action.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
@@ -1062,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1095,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1143,5 +1287,19 @@ void ql_mpi_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a8..b8103425facb 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -958,21 +958,22 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Too many multicast addresses
* accept all traffic */
- else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
+ else if ((netdev_mc_count(dev) > MCAST_MAX) ||
+ (dev->flags & IFF_ALLMULTI))
reg |= 0x0020;
iowrite16(reg, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
/* Build the hash table */
- if (dev->mc_count > MCAST_MAX) {
+ if (netdev_mc_count(dev) > MCAST_MAX) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -994,14 +995,14 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
- for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
+ for (i = 0, dmi; (i < netdev_mc_count(dev)) && (i < MCAST_MAX); i++) {
adrp = (u16 *)dmi->dmi_addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
iowrite16(adrp[1], ioaddr + MID_1M + 8*i);
iowrite16(adrp[2], ioaddr + MID_1H + 8*i);
dmi = dmi->next;
}
- for (i = dev->mc_count; i < MCAST_MAX; i++) {
+ for (i = netdev_mc_count(dev); i < MCAST_MAX; i++) {
iowrite16(0xffff, ioaddr + MID_0L + 8*i);
iowrite16(0xffff, ioaddr + MID_0M + 8*i);
iowrite16(0xffff, ioaddr + MID_0H + 8*i);
@@ -1222,7 +1223,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id r6040_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a24..616ae5aa66aa 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
static void rtl_hw_start_8168(struct net_device *);
static void rtl_hw_start_8101(struct net_device *);
-static struct pci_device_id rtl8169_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -744,12 +744,10 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
netif_carrier_on(dev);
- if (netif_msg_ifup(tp))
- printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ netif_info(tp, ifup, dev, "link up\n");
} else {
- if (netif_msg_ifdown(tp))
- printk(KERN_INFO PFX "%s: link down\n", dev->name);
netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
}
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -862,11 +860,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
} else if (autoneg == AUTONEG_ENABLE)
RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
else {
- if (netif_msg_link(tp)) {
- printk(KERN_WARNING "%s: "
- "incorrect speed setting refused in TBI mode\n",
- dev->name);
- }
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
ret = -EOPNOTSUPP;
}
@@ -901,9 +896,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
(tp->mac_version != RTL_GIGA_MAC_VER_15) &&
(tp->mac_version != RTL_GIGA_MAC_VER_16)) {
giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else if (netif_msg_link(tp)) {
- printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
- dev->name);
+ } else {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -2705,8 +2700,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
if (tp->link_ok(ioaddr))
goto out_unlock;
- if (netif_msg_link(tp))
- printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
tp->phy_reset_enable(ioaddr);
@@ -2776,8 +2770,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
return;
msleep(1);
}
- if (netif_msg_link(tp))
- printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ netif_err(tp, link, dev, "PHY reset failed\n");
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -2811,8 +2804,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
*/
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -3012,8 +3005,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "enable failure\n");
+ netif_err(tp, probe, dev, "enable failure\n");
goto err_out_free_dev_1;
}
@@ -3023,29 +3015,24 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- }
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
rc = -ENODEV;
goto err_out_mwi_3;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "Invalid PCI region size(s), aborting\n");
- }
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out_mwi_3;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "could not request regions.\n");
+ netif_err(tp, probe, dev, "could not request regions\n");
goto err_out_mwi_3;
}
@@ -3058,10 +3045,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "DMA configuration failed.\n");
- }
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
goto err_out_free_res_4;
}
}
@@ -3069,15 +3053,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res_4;
}
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap && netif_msg_probe(tp))
- dev_info(&pdev->dev, "no PCI Express capability\n");
+ if (!tp->pcie_cap)
+ netif_info(tp, probe, dev, "no PCI Express capability\n");
RTL_W16(IntrMask, 0x0000);
@@ -3100,10 +3083,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Use appropriate default if unknown */
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- if (netif_msg_probe(tp)) {
- dev_notice(&pdev->dev,
- "unknown MAC, using family default\n");
- }
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
tp->mac_version = cfg->default_ver;
}
@@ -3185,19 +3166,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- if (netif_msg_probe(tp)) {
- u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
-
- printk(KERN_INFO "%s: %s at 0x%lx, "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
- "XID %08x IRQ %d\n",
- dev->name,
- rtl_chip_info[tp->chipset].name,
- dev->base_addr,
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
- }
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_info[tp->chipset].name,
+ dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
rtl8169_init_phy(dev, tp);
@@ -4136,10 +4108,10 @@ static void rtl8169_reinit_task(struct work_struct *work)
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
- if (net_ratelimit() && netif_msg_drv(tp)) {
- printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
- " Rescheduling.\n", dev->name, ret);
- }
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
@@ -4169,10 +4141,8 @@ static void rtl8169_reset_task(struct work_struct *work)
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
} else {
- if (net_ratelimit() && netif_msg_intr(tp)) {
- printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
- dev->name);
- }
+ if (net_ratelimit())
+ netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -4260,11 +4230,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts1;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
- if (netif_msg_drv(tp)) {
- printk(KERN_ERR
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
- }
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop;
}
@@ -4326,11 +4292,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- if (netif_msg_intr(tp)) {
- printk(KERN_ERR
- "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
- dev->name, pci_cmd, pci_status);
- }
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4354,8 +4317,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
- if (netif_msg_intr(tp))
- printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
@@ -4482,11 +4444,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
if (status & DescOwn)
break;
if (unlikely(status & RxRES)) {
- if (netif_msg_rx_err(tp)) {
- printk(KERN_INFO
- "%s: Rx ERROR. status = %08x\n",
- dev->name, status);
- }
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
@@ -4549,8 +4508,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
/*
@@ -4560,8 +4519,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
* after refill ?
* - how do others driver handle this condition (Uh oh...).
*/
- if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -4616,10 +4575,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
}
/* We only get a new MSI interrupt when all active irq
@@ -4755,15 +4713,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- if (netif_msg_link(tp)) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
- }
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -4774,7 +4729,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 1c257098d0a6..266baf534964 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-static struct pci_device_id rr_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3c4836d0898f..102be16e9b52 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
-static struct pci_device_id s2io_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
@@ -5055,8 +5055,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && dev->mc_count) {
- if (dev->mc_count >
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
@@ -5066,7 +5066,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = dev->mc_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
@@ -5092,7 +5092,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f97..00ff8995ad69 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -429,7 +429,7 @@ static void _sc92031_set_mar(struct net_device *dev)
u32 mar0 = 0, mar1 = 0;
if ((dev->flags & IFF_PROMISC) ||
- dev->mc_count > multicast_filter_limit ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
@@ -1589,7 +1589,7 @@ out:
return 0;
}
-static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 46997e177ee3..dc58d9fd0f32 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1615,7 +1615,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < net_dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net_dev); i++) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
@@ -1940,7 +1940,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
**************************************************************************/
/* PCI device ID table */
-static struct pci_device_id efx_pci_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac051530..7eff0a615cb3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern void efx_suspend(struct efx_nic *efx);
-extern void efx_resume(struct efx_nic *efx);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c477..d9f9c02a928e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
" setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0, i;
enum efx_loopback_mode mode;
- efx_fill_test(n++, strings, data, &tests->mdio,
- "core", 0, "mdio", NULL);
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
if (name == NULL)
break;
- efx_fill_test(n++, strings, data, &tests->phy[i],
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9d009c46e962..1b8d83657aaa 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -909,6 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
else
efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -1006,7 +1008,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ FR_AZ_TX_CFG,
@@ -1728,7 +1730,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
/**************************************************************************
*
- * Revision-dependent attributes used by efx.c
+ * Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 9f035b9f0350..86610db2cff5 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -896,29 +896,73 @@ fail:
return rc;
}
-int efx_mcdi_handle_assertion(struct efx_nic *efx)
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ return rc;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- union {
- u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 reboot[MC_CMD_REBOOT_IN_LEN];
- } inbuf;
- u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
unsigned int flags, index, ofst;
const char *reason;
size_t outlen;
int retry;
int rc;
- /* Check if the MC is in the assertion handler, retrying twice. Once
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
* because a boot-time assertion might cause this command to fail
* with EINTR. And once again because GET_ASSERTS can race with
* MC_CMD_REBOOT running on the other port. */
retry = 2;
do {
- MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
- assertion, sizeof(assertion), &outlen);
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc)
@@ -926,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EINVAL;
- flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
return 0;
- /* Reset the hardware atomically such that only one port with succeed.
- * This command will succeed if a reboot is no longer required (because
- * the other port did it first), but fail with EIO if it succeeds.
- */
- BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
- MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
-
- /* Print out the assertion */
reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
? "system-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -949,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
? "watchdog reset"
: "unknown assertion";
EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(assertion, ofst));
+ MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
return 0;
}
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index 10ce98f4c0fb..f1f89ad4075a 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -116,6 +116,7 @@ extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
+extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 73e71f420624..bd59302695b3 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
-#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
-/* MC_CMD_START_PHY_BIST:
+/* MC_CMD_START_BIST:
* Start a BIST test on the PHY.
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
*/
#define MC_CMD_START_BIST 0x25
#define MC_CMD_START_BIST_IN_LEN 4
-#define MC_CMD_START_BIST_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_OUT_LEN 0
-/* Run the PHY's short BIST */
-#define MC_CMD_PHY_BIST_SHORT 1
-/* Run the PHY's long BIST */
-#define MC_CMD_PHY_BIST_LONG 2
+/* Run the PHY's short cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 1
+/* Run the PHY's long cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_LONG 2
/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
#define MC_CMD_BPX_SERDES_BIST 3
+/* Run the MC loopback tests */
+#define MC_CMD_MC_LOOPBACK_BIST 4
+/* Run the PHY's standard BIST */
+#define MC_CMD_PHY_BIST 5
/* MC_CMD_POLL_PHY_BIST: (variadic output)
* Poll for BIST completion
*
- * Returns a single status code, and a binary blob of phy-specific
- * bist output. If the driver can't succesfully parse the BIST output,
- * it should still respect the Pass/Fail in OUT.RESULT.
+ * Returns a single status code, and optionally some PHY specific
+ * bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * Locks required: PHY_LOCK if doing a PHY BIST
+ * If a driver can't succesfully parse the BIST output, it should
+ * still respect the pass/Fail in OUT.RESULT
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
* Return code: 0, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
#define MC_CMD_POLL_BIST_PASSED 2
#define MC_CMD_POLL_BIST_FAILED 3
#define MC_CMD_POLL_BIST_TIMEOUT 4
+/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+/* SFT9001-specific: */
+/* (offset 4 unused?) */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
+/* mrsfp "PHY" driver: */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
/* MC_CMD_PHY_SPI: (variadic in, variadic out)
* Read/Write/Erase the PHY SPI device
@@ -1206,6 +1245,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
@@ -1216,7 +1262,8 @@
#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
#define MC_CMD_WOL_TYPE_BITMAP 0x5
-#define MC_CMD_WOL_TYPE_MAX 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_FILTER_MODE_SIMPLE 0x0
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1357,14 +1404,24 @@
* Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
/* MC_CMD_REBOOT:
- * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
- * when the driver notices an assertion failure, to allow two ports to
- * both recover (semi-)gracefully.
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices
+ * an assertion failure (at which point it is expected to perform a complete
+ * tear down and reinitialise), to allow both ports to reset the MC once
+ * in an atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares
+ * with REBOOT_ON_ASSERT=0.
*
* Locks required: NONE
* Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1469,11 +1526,10 @@
((_ofst) + 6)
/* MC_CMD_READ_SENSORS
- * Returns the current (value, state) for each sensor
+ * Returns the current reading from each sensor
*
- * Returns the current (value, state) [each 16bit] of each sensor supported by
- * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
- * memory.
+ * Returns a sparse array of sensor readings (indexed by the sensor
+ * type) into host memory. Each array element is a dword.
*
* The MC will send a SENSOREVT event every time any sensor changes state. The
* driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1486,6 +1542,12 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* Sensor reading fields */
+#define MC_CMD_READ_SENSOR_VALUE_LBN 0
+#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
+#define MC_CMD_READ_SENSOR_STATE_LBN 16
+#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
+
/* MC_CMD_GET_PHY_STATE:
* Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1577,4 +1639,98 @@
#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+/* MC_CMD_TEST_ASSERT:
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully).
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_TESTASSERT_IN_LEN 0
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND 0x4a
+ *
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it
+ * doesn't understand the given workaround number - which should not
+ * be treated as a hard error by client code.
+ *
+ * This op does not imply any semantics about each workaround, that's between
+ * the driver and the mcfw on a per-workaround basis.
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 1
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO:
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs).
+ *
+ * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
+ * the valid "page number" input values, and the output data, are interpreted
+ * on a per-type basis.
+ *
+ * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
+ * 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+
+/* MC_CMD_NVRAM_TEST:
+ * Test a particular NVRAM partition for valid contents (where "valid"
+ * depends on the type of partition).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0
+#define MC_CMD_NVRAM_TEST_FAIL 1
+#define MC_CMD_NVRAM_TEST_NOTSUPP 2
+
+/* MC_CMD_MRSFP_TWEAK: (debug)
+ * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first.
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+
+/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
+ * used for post-3.0 extensions. If you run out of space, look for gaps or
+ * commands that are unused in the existing range. */
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index eb694af7a473..34c22fa986e2 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -381,6 +381,18 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+
return 0;
fail:
@@ -436,7 +448,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
/* The link partner capabilities are only relevent if the
* link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
@@ -560,6 +572,27 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
return 0;
}
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EMSGSIZE;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -569,6 +602,7 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
.run_tests = NULL,
.test_name = NULL,
};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f0594..0548fcbbdcd0 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
+ efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339d..f89e71929603 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
+/* Liveness self-test for MDIO PHYs */
+extern int efx_mdio_test_alive(struct efx_nic *efx);
+
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d5aab5b3fa06..cb018e272097 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
* Special buffers are used for the event queues and the TX and RX
* descriptor queues for each channel. They are *not* used for the
* actual transmit and receive buffers.
- *
- * Note that for Falcon, TX and RX descriptor queues live in host memory.
- * Allocation and freeing procedures must take this into account.
*/
struct efx_special_buffer {
void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
*
- * Falcon uses these buffers for its interrupt status registers and
+ * The NIC uses these buffers for its interrupt status registers and
* MAC stats dumps.
*/
struct efx_buffer {
@@ -516,8 +512,9 @@ struct efx_mac_operations {
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
* @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate.
+ * @run_tests: Run tests and record results as appropriate (offline).
* Flags are the ethtool tests flags.
*/
struct efx_phy_operations {
@@ -532,6 +529,7 @@ struct efx_phy_operations {
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
};
@@ -672,7 +670,7 @@ union efx_multicast_hash {
* @irq_status: Interrupt status buffer
* @last_irq_cpu: Last CPU to handle interrupt.
* This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by falcon_test_interrupt()
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
@@ -721,8 +719,7 @@ union efx_multicast_hash {
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
*
- * The @priv field of the corresponding &struct net_device points to
- * this.
+ * This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
char name[IFNAMSIZ];
@@ -995,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
* that the net driver will program into the MAC as the maximum frame
* length.
*
- * The 10G MAC used in Falcon requires 8-byte alignment on the frame
+ * The 10G MAC requires 8-byte alignment on the frame
* length, so we round up to the nearest 8.
*
* Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index db44224ed2ca..b06f8e348307 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
- *
- * Note that EVQ_RPTR_REG contains the index of the "last read" event,
- * whereas channel->eventq_read_ptr contains the index of the "next to
- * read" event.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
+ * because this might be a shared interrupt, but we do need to
+ * check the channel every time and preemptively rearm it if
+ * it's idle. */
+ efx_for_each_channel(channel, efx) {
+ if (!channel->work_pending)
+ efx_nic_eventq_read_ack(channel);
+ }
}
return result;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 67eec7a6e487..1bee62c83001 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -445,4 +445,5 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 250c8827b842..cf0139a7d9a4 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,9 +24,6 @@
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
-#include "spi.h"
-#include "io.h"
-#include "mdio_10g.h"
/*
* Loopback test packet structure
@@ -76,42 +73,15 @@ struct efx_loopback_state {
*
**************************************************************************/
-static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad;
- u16 physid1, physid2;
-
- if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
- devad = __ffs(efx->mdio.mmds);
- else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
- devad = MDIO_DEVAD_NONE;
- else
- return 0;
-
- mutex_lock(&efx->mac_lock);
- tests->mdio = -1;
-
- physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
- physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
- if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
- (physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
- rc = -EINVAL;
- goto out;
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
}
- if (EFX_IS10G(efx)) {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
- if (rc)
- goto out;
- }
-
-out:
- mutex_unlock(&efx->mac_lock);
- tests->mdio = rc ? -1 : 1;
return rc;
}
@@ -258,7 +228,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
return 0;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy, flags);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -684,7 +654,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
- rc = efx_test_mdio(efx, tests);
+ rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96b..643bef72b99d 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
*/
struct efx_self_tests {
/* online tests */
- int mdio;
+ int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
- int phy[EFX_MAX_PHY_TESTS];
+ int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f8c6771e66d8..1619fb5a64f5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -106,16 +106,11 @@ static int siena_probe_port(struct efx_nic *efx)
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
- /* Fill out MDIO structure and loopback modes */
+ /* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
- /* Initial assumption */
- efx->link_state.speed = 10000;
- efx->link_state.fd = true;
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
-
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -139,7 +134,7 @@ void siena_remove_port(struct efx_nic *efx)
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
@@ -181,6 +176,12 @@ static int siena_test_registers(struct efx_nic *efx)
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
@@ -582,6 +583,7 @@ struct efx_nic_type siena_a0_nic_type = {
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 3009c297c135..10db071bd837 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -842,6 +842,7 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
@@ -856,6 +857,7 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sft9001_test_name,
.run_tests = sft9001_run_tests,
};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6b364a6c6c60..ed999d31f1fa 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -660,7 +660,7 @@ static void sgiseeq_set_multicast(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7402b858cab7..42a35f086a9f 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
- /* pritnt device infomation */
- pr_info("Base address at 0x%x, ",
- (u32)ndev->base_addr);
-
- for (i = 0; i < 5; i++)
- printk("%02X:", ndev->dev_addr[i]);
- printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ /* print device infomation */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a0..8c4e38f9ebf7 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -334,7 +334,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
@@ -841,7 +841,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -852,7 +852,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75e..32ae87c09f5e 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
"SiS 900 PCI Fast Ethernet",
"SiS 7016 PCI Fast Ethernet"
};
-static struct pci_device_id sis900_pci_tbl [] = {
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
@@ -2288,7 +2288,7 @@ static void set_rx_mode(struct net_device *net_dev)
rx_mode = RFPromiscuous;
for (i = 0; i < table_entries; i++)
mc_filter[i] = 0xffff;
- } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
(net_dev->flags & IFF_ALLMULTI)) {
/* too many multicast addresses or accept all multicast packet */
rx_mode = RFAAB | RFAAM;
@@ -2301,7 +2301,7 @@ static void set_rx_mode(struct net_device *net_dev)
struct dev_mc_list *mclist;
rx_mode = RFAAB;
for (i = 0, mclist = net_dev->mc_list;
- mclist && i < net_dev->mc_count;
+ mclist && i < netdev_mc_count(net_dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a728503..346adfae986f 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
-static struct pci_device_id skfddi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
- smc->hw.fddi_canon_addr.a[0],
- smc->hw.fddi_canon_addr.a[1],
- smc->hw.fddi_canon_addr.a[2],
- smc->hw.fddi_canon_addr.a[3],
- smc->hw.fddi_canon_addr.a[4],
- smc->hw.fddi_canon_addr.a[5]);
+ pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -878,27 +872,20 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
- } else if (dev->mc_count > 0) {
- if (dev->mc_count <= FPMAX_MULTICAST) {
+ } else if (!netdev_mc_empty(dev)) {
+ if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
- pr_debug(" %02x %02x %02x ",
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2]);
- pr_debug("%02x %02x %02x\n",
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
+ dmi->dmi_addr);
dmi = dmi->next;
} // for
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc00163..ffa55df4d607 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,7 +78,7 @@ static int debug = -1; /* defaults above */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static const struct pci_device_id skge_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
@@ -1074,13 +1074,11 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
- if (netif_msg_link(skge)) {
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- skge->netdev->name, skge->speed,
- skge->duplex == DUPLEX_FULL ? "full" : "half",
- skge_pause(skge->flow_status));
- }
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
}
static void skge_link_down(struct skge_port *skge)
@@ -1089,8 +1087,7 @@ static void skge_link_down(struct skge_port *skge)
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
- if (netif_msg_link(skge))
- printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
+ netif_info(skge, link, skge->netdev, "Link is down\n");
}
@@ -1792,9 +1789,8 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u16 status = xm_read16(hw, port, XM_ISRC);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
xm_link_down(hw, port);
@@ -1898,9 +1894,8 @@ static inline void bcom_phy_intr(struct skge_port *skge)
u16 isrc;
isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
- skge->netdev->name, isrc);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
if (isrc & PHY_B_IS_PSE)
printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
@@ -2298,9 +2293,8 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_FF_OR) {
++dev->stats.rx_fifo_errors;
@@ -2379,9 +2373,8 @@ static void yukon_phy_intr(struct skge_port *skge)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
- skge->netdev->name, istatus, phystat);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
@@ -2571,8 +2564,7 @@ static int skge_up(struct net_device *dev)
if (!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
- if (netif_msg_ifup(skge))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
if (dev->mtu > RX_BUF_SIZE)
skge->rx_buf_size = dev->mtu + ETH_HLEN;
@@ -2670,8 +2662,7 @@ static int skge_down(struct net_device *dev)
if (skge->mem == NULL)
return 0;
- if (netif_msg_ifdown(skge))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
netif_tx_disable(dev);
@@ -2825,9 +2816,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (unlikely(netif_msg_tx_queued(skge)))
- printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - skge->tx_ring.start, skb->len);
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
skge->tx_ring.to_use = e->next;
smp_wmb();
@@ -2858,9 +2849,8 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - skge->tx_ring.start);
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n", e - skge->tx_ring.start);
dev_kfree_skb(e->skb);
}
@@ -2885,8 +2875,7 @@ static void skge_tx_timeout(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_timer(skge))
- printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
skge_tx_clean(dev);
@@ -2932,7 +2921,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = dev->mc_count;
+ int i, count = netdev_mc_count(dev);
struct dev_mc_list *list = dev->mc_list;
u32 mode;
u8 filter[8];
@@ -2987,7 +2976,7 @@ static void yukon_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
@@ -2996,7 +2985,7 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
yukon_add_filter(filter, list->dmi_addr);
}
@@ -3054,10 +3043,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
struct sk_buff *skb;
u16 len = control & BMU_BBC;
- if (unlikely(netif_msg_rx_status(skge)))
- printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- dev->name, e - skge->rx_ring.start,
- status, len);
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
if (len > skge->rx_buf_size)
goto error;
@@ -3111,10 +3099,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
return skb;
error:
- if (netif_msg_rx_err(skge))
- printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- dev->name, e - skge->rx_ring.start,
- control, status);
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
@@ -3885,9 +3872,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_probe(skge))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
static int __devinit skge_probe(struct pci_dev *pdev,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67249c3c9f50..a1198f141996 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.26"
+#define DRV_VERSION "1.27"
#define PFX DRV_NAME " "
/*
@@ -251,6 +251,8 @@ static void sky2_power_on(struct sky2_hw *hw)
sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
reg = sky2_read32(hw, B2_GP_IO);
reg |= GLB_GPIO_STAT_RACE_DIS;
@@ -731,7 +733,6 @@ static void sky2_wol_init(struct sky2_port *sky2)
unsigned port = sky2->port;
enum flow_control save_mode;
u16 ctrl;
- u32 reg1;
/* Bring hardware out of reset */
sky2_write16(hw, B0_CTST, CS_RST_CLR);
@@ -782,14 +783,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
- /* Turn on legacy PCI-Express PME mode */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 |= PCI_Y2_PME_LEGACY;
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
/* block receiver */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
-
}
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -800,29 +798,15 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
hw->chip_rev != CHIP_REV_YU_EX_A0) ||
hw->chip_id >= CHIP_ID_YUKON_FE_P) {
/* Yukon-Extreme B0 and further Extreme devices */
- /* enable Store & Forward mode for TX */
-
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_DIS | TX_STFW_ENA);
-
- else
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_ENA| TX_STFW_ENA);
- } else {
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
- else {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
- (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
-
- /* Can't do offload because of lack of store/forward */
- dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
- }
- }
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -1065,6 +1049,40 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
return le;
}
+static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
/* Build description to hardware for one receive segment */
static void sky2_rx_add(struct sky2_port *sky2, u8 op,
dma_addr_t map, unsigned len)
@@ -1103,18 +1121,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
int i;
re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
- return -EIO;
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
pci_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- re->frag_addr[i] = pci_map_page(pdev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ goto map_page_error;
+ }
return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1324,8 +1363,32 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
}
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
/*
- * Allocate and setup receiver buffer pool.
+ * Setup receiver buffer pool.
* Normal case this ends up creating one list element for skb
* in the receive ring. Worst case if using large MTU and each
* allocation falls on a different 64 bit region, that results
@@ -1333,12 +1396,12 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
* One element is used for checksum enable/disable, and one
* extra to avoid wrap.
*/
-static int sky2_rx_start(struct sky2_port *sky2)
+static void sky2_rx_start(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
struct rx_ring_info *re;
unsigned rxq = rxqaddr[sky2->port];
- unsigned i, size, thresh;
+ unsigned i, thresh;
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
@@ -1359,40 +1422,9 @@ static int sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
- /* Space needed for frame data + headers rounded up */
- size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
-
- /* Stopping point for hardware truncation */
- thresh = (size - 8) / sizeof(u32);
-
- sky2->rx_nfrags = size >> PAGE_SHIFT;
- BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
-
- /* Compute residue after pages */
- size -= sky2->rx_nfrags << PAGE_SHIFT;
-
- /* Optimize to handle small packets and headers */
- if (size < copybreak)
- size = copybreak;
- if (size < ETH_HLEN)
- size = ETH_HLEN;
-
- sky2->rx_data_size = size;
-
- /* Fill Rx ring */
+ /* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
-
- re->skb = sky2_rx_alloc(sky2);
- if (!re->skb)
- goto nomem;
-
- if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
- dev_kfree_skb(re->skb);
- re->skb = NULL;
- goto nomem;
- }
-
sky2_rx_submit(sky2, re);
}
@@ -1402,6 +1434,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
* the register is limited to 9 bits, so if you do frames > 2052
* you better get the MTU right!
*/
+ thresh = sky2_get_rx_threshold(sky2);
if (thresh > 0x1ff)
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
else {
@@ -1433,13 +1466,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
}
-
-
-
- return 0;
-nomem:
- sky2_rx_clean(sky2);
- return -ENOMEM;
}
static int sky2_alloc_buffers(struct sky2_port *sky2)
@@ -1470,7 +1496,7 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
if (!sky2->rx_ring)
goto nomem;
- return 0;
+ return sky2_alloc_rx_skbs(sky2);
nomem:
return -ENOMEM;
}
@@ -1479,6 +1505,8 @@ static void sky2_free_buffers(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
+ sky2_rx_clean(sky2);
+
if (sky2->rx_le) {
pci_free_consistent(hw->pdev, RX_LE_BYTES,
sky2->rx_le, sky2->rx_le_map);
@@ -1497,16 +1525,16 @@ static void sky2_free_buffers(struct sky2_port *sky2)
sky2->rx_ring = NULL;
}
-/* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static void sky2_hw_up(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask, ramsize;
- int cap, err;
+ u32 ramsize;
+ int cap;
struct net_device *otherdev = hw->dev[sky2->port^1];
+ tx_init(sky2);
+
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions
@@ -1518,16 +1546,7 @@ static int sky2_up(struct net_device *dev)
cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
cmd &= ~PCI_X_CMD_MAX_SPLIT;
sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
-
- }
-
- netif_carrier_off(dev);
-
- err = sky2_alloc_buffers(sky2);
- if (err)
- goto err_out;
-
- tx_init(sky2);
+ }
sky2_mac_init(hw, port);
@@ -1536,7 +1555,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
+ pr_debug(PFX "%s: ram buffer %dK\n", sky2->netdev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1568,18 +1587,33 @@ static int sky2_up(struct net_device *dev)
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
- err = sky2_rx_start(sky2);
+ sky2_rx_start(sky2);
+}
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
if (err)
goto err_out;
+ sky2_hw_up(sky2);
+
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_IMSK);
- if (netif_msg_ifup(sky2))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(sky2, ifup, dev, "enabling interface\n");
return 0;
@@ -1662,9 +1696,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
goto mapping_error;
slot = sky2->tx_prod;
- if (unlikely(netif_msg_tx_queued(sky2)))
- printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
- dev->name, slot, skb->len);
+ netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+ "tx queued, slot %u, len %d\n", slot, skb->len);
/* Send high bits if needed */
upper = upper_32_bits(mapping);
@@ -1829,9 +1862,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2_tx_unmap(sky2->hw->pdev, re);
if (skb) {
- if (unlikely(netif_msg_tx_done(sky2)))
- printk(KERN_DEBUG "%s: tx done %u\n",
- dev->name, idx);
+ netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+ "tx done %u\n", idx);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
@@ -1845,10 +1877,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
-
- /* Wake unless it's detached, and called e.g. from sky2_down() */
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
- netif_wake_queue(dev);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -1873,21 +1901,11 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
}
-/* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static void sky2_hw_down(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
- u32 imask;
-
- /* Never really got started! */
- if (!sky2->tx_le)
- return 0;
-
- if (netif_msg_ifdown(sky2))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
/* Force flow control off */
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1920,15 +1938,6 @@ static int sky2_down(struct net_device *dev)
sky2_rx_stop(sky2);
- /* Disable port IRQ */
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~portirq_msk[port];
- sky2_write32(hw, B0_IMSK, imask);
- sky2_read32(hw, B0_IMSK);
-
- synchronize_irq(hw->pdev->irq);
- napi_synchronize(&hw->napi);
-
spin_lock_bh(&sky2->phy_lock);
sky2_phy_power_down(hw, port);
spin_unlock_bh(&sky2->phy_lock);
@@ -1937,8 +1946,29 @@ static int sky2_down(struct net_device *dev)
/* Free any pending frames stuck in HW queue */
sky2_tx_complete(sky2, sky2->tx_prod);
+}
- sky2_rx_clean(sky2);
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+ /* Disable port IRQ */
+ sky2_write32(hw, B0_IMSK,
+ sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+
+ sky2_hw_down(sky2);
sky2_free_buffers(sky2);
@@ -1994,12 +2024,11 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, LNK_LED_REG),
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- sky2->netdev->name, sky2->speed,
- sky2->duplex == DUPLEX_FULL ? "full" : "half",
- fc_name[sky2->flow_status]);
+ netif_info(sky2, link, sky2->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ sky2->speed,
+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
+ fc_name[sky2->flow_status]);
}
static void sky2_link_down(struct sky2_port *sky2)
@@ -2019,8 +2048,7 @@ static void sky2_link_down(struct sky2_port *sky2)
/* Turn off link LED */
sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
+ netif_info(sky2, link, sky2->netdev, "Link is down\n");
sky2_phy_init(hw, port);
}
@@ -2110,9 +2138,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
- sky2->netdev->name, istatus, phystat);
+ netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+ istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (sky2_autoneg_done(sky2, phystat) == 0)
@@ -2166,8 +2193,7 @@ static void sky2_tx_timeout(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
- if (netif_msg_timer(sky2))
- printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
+ netif_err(sky2, timer, dev, "tx timeout\n");
printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
dev->name, sky2->tx_cons, sky2->tx_prod,
@@ -2187,14 +2213,20 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
+ /* MTU size outside the spec */
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
+ /* MTU > 1500 on yukon FE and FE+ not allowed */
if (new_mtu > ETH_DATA_LEN &&
(hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
+ /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@@ -2229,7 +2261,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
- err = sky2_rx_start(sky2);
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2306,30 +2342,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
struct rx_ring_info *re,
unsigned int length)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- /* Don't be tricky about reusing pages (yet) */
- nskb = sky2_rx_alloc(sky2);
- if (unlikely(!nskb))
- return NULL;
+ nre.skb = sky2_rx_alloc(sky2);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
skb = re->skb;
sky2_rx_unmap_skb(sky2->hw->pdev, re);
-
prefetch(skb->data);
- re->skb = nskb;
- if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
- dev_kfree_skb(nskb);
- re->skb = skb;
- return NULL;
- }
+ *re = nre;
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
skb_put(skb, length);
return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
}
/*
@@ -2350,9 +2388,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
count -= VLAN_HLEN;
#endif
- if (unlikely(netif_msg_rx_status(sky2)))
- printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
- dev->name, sky2->rx_next, status, length);
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
@@ -2381,6 +2419,9 @@ okay:
skb = receive_copy(sky2, re, length);
else
skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
resubmit:
sky2_rx_submit(sky2, re);
@@ -2390,9 +2431,10 @@ len_error:
/* Truncation of overlength packets
causes PHY length to not match MAC length */
++dev->stats.rx_length_errors;
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- pr_info(PFX "%s: rx length error: status %#x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx length error: status %#x length %d\n",
+ status, length);
goto resubmit;
error:
@@ -2402,9 +2444,9 @@ error:
goto resubmit;
}
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx error, status 0x%x length %d\n", status, length);
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
dev->stats.rx_length_errors++;
@@ -2421,8 +2463,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_running(dev))
+ if (netif_running(dev)) {
sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
}
static inline void sky2_skb_rx(const struct sky2_port *sky2,
@@ -2458,6 +2505,32 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
}
}
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload */
+ sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2492,11 +2565,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXSTAT:
total_packets[port]++;
total_bytes[port] += length;
+
skb = sky2_receive(dev, length, status);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!skb)
break;
- }
/* This chip reports checksum status differently */
if (hw->flags & SKY2_HW_NEW_LE) {
@@ -2527,37 +2599,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
/* fall through */
#endif
case OP_RXCHKS:
- if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
- break;
-
- /* If this happens then driver assuming wrong format */
- if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
- if (net_ratelimit())
- printk(KERN_NOTICE "%s: unexpected"
- " checksum status\n",
- dev->name);
- break;
- }
-
- /* Both checksum counters are programmed to start at
- * the same offset, so unless there is a problem they
- * should match. This failure is an early indication that
- * hardware receive checksumming won't work.
- */
- if (likely(status >> 16 == (status & 0xffff))) {
- skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = le16_to_cpu(status);
- } else {
- printk(KERN_NOTICE PFX "%s: hardware receive "
- "checksum problem (status = %#x)\n",
- dev->name, status);
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
-
- sky2_write32(sky2->hw,
- Q_ADDR(rxqaddr[port], Q_CSR),
- BMU_DIS_RX_CHKSUM);
- }
+ if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
+ sky2_rx_checksum(sky2, status);
break;
case OP_TXINDEXLE:
@@ -2683,9 +2726,7 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(dev);
u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_CO_OV)
gma_read16(hw, port, GM_RX_IRQ_SRC);
@@ -3010,11 +3051,20 @@ static void sky2_reset(struct sky2_hw *hw)
u32 hwe_mask = Y2_HWE_ALL_MASK;
/* disable ASF */
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
status = sky2_read16(hw, HCU_CCSR);
status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
} else
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
@@ -3097,7 +3147,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
if (reg & PCI_EXP_LNKCTL_ASPMC) {
- int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
@@ -3188,7 +3238,9 @@ static void sky2_reset(struct sky2_hw *hw)
static void sky2_detach(struct net_device *dev)
{
if (netif_running(dev)) {
+ netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
sky2_down(dev);
}
}
@@ -3216,48 +3268,53 @@ static int sky2_reattach(struct net_device *dev)
static void sky2_restart(struct work_struct *work)
{
struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+ u32 imask;
int i;
rtnl_lock();
- for (i = 0; i < hw->ports; i++)
- sky2_detach(hw->dev[i]);
napi_disable(&hw->napi);
+ synchronize_irq(hw->pdev->irq);
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
- sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
- for (i = 0; i < hw->ports; i++)
- sky2_reattach(hw->dev[i]);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
- rtnl_unlock();
-}
+ if (!netif_running(dev))
+ continue;
-static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
-{
- return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
-}
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
-static void sky2_hw_set_wol(struct sky2_hw *hw)
-{
- int wol = 0;
- int i;
+ sky2_reset(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- if (sky2->wol)
- wol = 1;
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ netif_wake_queue(dev);
}
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
- device_set_wakeup_enable(&hw->pdev->dev, wol);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+
+ rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+ return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3278,11 +3335,6 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
sky2->wol = wol->wolopts;
-
- sky2_hw_set_wol(hw);
-
- if (!netif_running(dev))
- sky2_wol_init(sky2);
return 0;
}
@@ -3593,7 +3645,7 @@ static void sky2_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)
+ else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
@@ -3602,7 +3654,7 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
sky2_add_filter(filter, list->dmi_addr);
}
@@ -3864,6 +3916,50 @@ static int sky2_get_regs_len(struct net_device *dev)
return 0x4000;
}
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
/*
* Returns copy of control register region
* Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3878,55 +3974,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (b = 0; b < 128; b++) {
- /* This complicated switch statement is to make sure and
- * only access regions that are unreserved.
- * Some blocks are only valid on dual port cards.
- * and block 3 has some special diagnostic registers that
- * are poison.
- */
- switch (b) {
- case 3:
- /* skip diagnostic ram region */
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
- break;
-
- /* dual port cards only */
- case 5: /* Tx Arbiter 2 */
- case 9: /* RX2 */
- case 14 ... 15: /* TX2 */
- case 17: case 19: /* Ram Buffer 2 */
- case 22 ... 23: /* Tx Ram Buffer 2 */
- case 25: /* Rx MAC Fifo 1 */
- case 27: /* Tx MAC Fifo 2 */
- case 31: /* GPHY 2 */
- case 40 ... 47: /* Pattern Ram 2 */
- case 52: case 54: /* TCP Segmentation 2 */
- case 112 ... 116: /* GMAC 2 */
- if (sky2->hw->ports == 1)
- goto reserved;
- /* fall through */
- case 0: /* Control */
- case 2: /* Mac address */
- case 4: /* Tx Arbiter 1 */
- case 7: /* PCI express reg */
- case 8: /* RX1 */
- case 12 ... 13: /* TX1 */
- case 16: case 18:/* Rx Ram Buffer 1 */
- case 20 ... 21: /* Tx Ram Buffer 1 */
- case 24: /* Rx MAC Fifo 1 */
- case 26: /* Tx MAC Fifo 1 */
- case 28 ... 29: /* Descriptor and status unit */
- case 30: /* GPHY 1*/
- case 32 ... 39: /* Pattern Ram 1 */
- case 48: case 50: /* TCP Segmentation 1 */
- case 56 ... 60: /* PCI space */
- case 80 ... 84: /* GMAC 1 */
+ else if (sky2_reg_access_ok(sky2->hw, b))
memcpy_fromio(p, io, 128);
- break;
- default:
-reserved:
+ else
memset(p, 0, 128);
- }
p += 128;
io += 128;
@@ -4466,9 +4520,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
{
const struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_msg_probe(sky2))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
}
/* Handle software interrupt used during MSI test */
@@ -4774,7 +4826,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4799,6 +4850,8 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
wol |= sky2->wol;
}
+ device_set_wakeup_enable(&pdev->dev, wol != 0);
+
sky2_write32(hw, B0_IMSK, 0);
napi_disable(&hw->napi);
sky2_power_aux(hw);
@@ -4811,6 +4864,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+#ifdef CONFIG_PM
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4830,10 +4884,11 @@ static int sky2_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
/* Re-enable all clocks */
- if (hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
sky2_reset(hw);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
@@ -4859,34 +4914,7 @@ out:
static void sky2_shutdown(struct pci_dev *pdev)
{
- struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
-
- if (!hw)
- return;
-
- rtnl_lock();
- del_timer_sync(&hw->watchdog_timer);
-
- for (i = 0; i < hw->ports; i++) {
- struct net_device *dev = hw->dev[i];
- struct sky2_port *sky2 = netdev_priv(dev);
-
- if (sky2->wol) {
- wol = 1;
- sky2_wol_init(sky2);
- }
- }
-
- if (wol)
- sky2_power_aux(hw);
- rtnl_unlock();
-
- pci_enable_wake(pdev, PCI_D3hot, wol);
- pci_enable_wake(pdev, PCI_D3cold, wol);
-
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ sky2_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d834..a5e182dd9819 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1895,14 +1895,14 @@ enum {
/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
enum {
- TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
- TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
- TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
- TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
struct sk_buff *skb;
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
-#define TX_MAP_PAGE 000002
+#define TX_MAP_PAGE 0x0002
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457b..ef9674c6713f 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1323,7 +1323,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
@@ -1351,7 +1351,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
u32 position;
/* do we have a pointer here? */
@@ -2017,10 +2017,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
"set using ifconfig\n", dev->name);
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: ", dev->name);
- for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
- printk("%2.2x\n", dev->dev_addr[5]);
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
}
if (lp->phy_type == 0) {
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 8371b82323ac..41c3dddeab50 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1542,7 +1542,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* We just get all multicast packets even if we only want them
. from one source. This will be changed at some future
. point. */
- else if (dev->mc_count ) {
+ else if (!netdev_mc_empty(dev)) {
/* support hardware multicasting */
/* be sure I get rid of flags I might have set */
@@ -1550,7 +1550,7 @@ static void smc_set_multicast_list(struct net_device *dev)
ioaddr + RCR );
/* NOTE: this has to set the bank, so make sure it is the
last thing called. The bank is set to zero at the top */
- smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ smc_setmulticast(ioaddr, netdev_mc_count(dev), dev->mc_list);
}
else {
outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ea4fae79d6ec..66450127c5a1 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1395,7 +1395,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(2, "%s: RCR_ALMUL\n", dev->name);
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* the number of the 8 bit register, while the low 3 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
@@ -1423,7 +1423,7 @@ static void smc_set_multicast_list(struct net_device *dev)
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
int position;
/* do we have a pointer here? */
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 494cd91ea39c..3c1f9aa84cf5 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1383,7 +1383,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
pdata->hashhi = 0;
pdata->hashlo = 0;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
@@ -1408,7 +1408,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
}
mc_list = mc_list->next;
}
- if (count != (unsigned int)dev->mc_count)
+ if (count != (unsigned int)netdev_mc_count(dev))
SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3c..2bd3c986559a 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
int last_carrier;
};
-static const struct pci_device_id smsc9420_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
@@ -1062,7 +1062,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mc_list = dev->mc_list;
u32 hash_lo = 0, hash_hi = 0;
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 9599ce77ef85..bd8bc66f2e00 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -541,13 +541,15 @@ static void sonic_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
+ for (i = 1; i <= netdev_mc_count(dev); i++) {
addr = dmi->dmi_addr;
dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bfc..16191998ac67 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
char spider_net_driver_name[] = "spidernet";
-static struct pci_device_id spider_net_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index f9521136a869..58bc7ac086cf 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
CH_6915 = 0,
};
-static struct pci_device_id starfire_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
{ 0, }
};
@@ -1796,15 +1796,15 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode |= AcceptAll;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
- } else if (dev->mc_count <= 14) {
+ } else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
+ for (i = 2, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev) + 2;
i++, mclist = mclist->next) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
@@ -1825,7 +1825,7 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7f..fb287649a305 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
select PHYLIB
depends on NETDEVICES && CPU_SUBTYPE_ST40
help
- This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
- controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and fully tested on the STMicroelectronics
+ platforms.
if STMMAC_ETH
@@ -32,7 +33,8 @@ config STMMAC_TIMER
default n
help
Use an external timer for mitigating the number of network
- interrupts.
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
choice
prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564dfa..c776af15fe1a 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- mac100.o gmac.o $(stmmac-y)
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e887..2a58172e986a 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
*******************************************************************************/
#include "descs.h"
-#include <linux/io.h>
-
-/* *********************************************
- DMA CRS Control and Status Register Mapping
- * *********************************************/
-#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
-#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
-#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
-#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
-#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
-#define DMA_STATUS 0x00001014 /* Status Register */
-#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
-#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
-#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
-#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
-
-/* ********************************
- DMA Control register defines
- * ********************************/
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* **************************************
- DMA Interrupt Enable register defines
- * **************************************/
-/**** NORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/**** ABNORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-
-/* ****************************
- * DMA Status register defines
- * ****************************/
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-
-/* Other defines */
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-/* DMA STORE-AND-FORWARD Operation Mode */
-#define SF_DMA_MODE 1
-
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
+#include <linux/netdevice.h>
struct stmmac_extra_stats {
/* Transmit errors */
@@ -169,7 +44,7 @@ struct stmmac_extra_stats {
unsigned long rx_toolong;
unsigned long rx_collision;
unsigned long rx_crc;
- unsigned long rx_lenght;
+ unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
unsigned long rx_gmac_overflow;
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
unsigned long normal_irq_n;
};
-/* GMAC core can compute the checksums in HW. */
-enum rx_frame_status {
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
csum_none = 2,
};
-static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
- unsigned int high, unsigned int low)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- writel(data, ioaddr + high);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + low);
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
- return;
-}
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
-static inline void stmmac_get_mac_addr(unsigned long ioaddr,
- unsigned char *addr, unsigned int high,
- unsigned int low)
-{
- unsigned int hi_addr, lo_addr;
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
- /* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + high);
- lo_addr = readl(ioaddr + low);
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
- /* Extract the MAC address from the high and low words */
- addr[0] = lo_addr & 0xff;
- addr[1] = (lo_addr >> 8) & 0xff;
- addr[2] = (lo_addr >> 16) & 0xff;
- addr[3] = (lo_addr >> 24) & 0xff;
- addr[4] = hi_addr & 0xff;
- addr[5] = (hi_addr >> 8) & 0xff;
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
- return;
-}
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
- /* DMA core initialization */
- int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump MAC registers */
- void (*dump_mac_regs) (unsigned long ioaddr);
- /* Dump DMA registers */
- void (*dump_dma_regs) (unsigned long ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- /* RX descriptor ring initialization */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* TX descriptor ring initialization */
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
/* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ void (*enable_dma_transmission) (unsigned long ioaddr);
+ void (*enable_dma_irq) (unsigned long ioaddr);
+ void (*disable_dma_irq) (unsigned long ioaddr);
+ void (*start_tx) (unsigned long ioaddr);
+ void (*stop_tx) (unsigned long ioaddr);
+ void (*start_rx) (unsigned long ioaddr);
+ void (*stop_rx) (unsigned long ioaddr);
+ int (*dma_interrupt) (unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* Dump MAC registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
void (*pmt) (unsigned long ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
};
struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct hw_cap {
- unsigned int version; /* Core Version register (GMAC) */
- unsigned int pmt; /* Power-Down mode (GMAC) */
+struct mac_device_info {
+ struct stmmac_ops *mac;
+ struct stmmac_desc_ops *desc;
+ struct stmmac_dma_ops *dma;
+ unsigned int pmt; /* support Power-Down */
+ struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- struct mii_regs mii;
};
-struct mac_device_info {
- struct hw_cap hw;
- struct stmmac_ops *ops;
-};
+struct mac_device_info *dwmac1000_setup(unsigned long addr);
+struct mac_device_info *dwmac100_setup(unsigned long addr);
-struct mac_device_info *gmac_setup(unsigned long addr);
-struct mac_device_info *mac100_setup(unsigned long addr);
+extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e57..63a03e264694 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors
- Use enhanced descriptors in case of GMAC Cores.
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062b..576b256ee388 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include "common.h"
-#include "mac100.h"
+#include "dwmac100.h"
+#include "dwmac_dma.h"
-#undef MAC100_DEBUG
-/*#define MAC100_DEBUG*/
-#ifdef MAC100_DEBUG
+#undef DWMAC100_DEBUG
+/*#define DWMAC100_DEBUG*/
+#ifdef DWMAC100_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#endif
-static void mac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(unsigned long ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
return;
}
-static void mac100_dump_mac_regs(unsigned long ioaddr)
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t MAC100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
+ readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
+ readl(ioaddr + MAC_ADDR_HIGH));
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
+ readl(ioaddr + MAC_ADDR_LOW));
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
+ readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
+ readl(ioaddr + MAC_VLAN2));
pr_info("\n\tMAC management counter registers\n");
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
return;
}
-static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
return;
}
-static void mac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
{
int i;
- DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
}
/* DMA controller has two counters to track the number of
- the receive missed frames. */
-static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x,
unsigned long ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
return;
}
-static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int mac100_get_tx_len(struct dma_desc *p)
+static int dwmac100_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns csum_none becasue the device
* is not able to compute the csum in HW. */
-static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = csum_none;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("mac100 Error: Oversized Ethernet "
+ pr_warning("dwmac100 Error: Oversized Ethernet "
"frame spanned multiple buffers\n");
stats->rx_length_errors++;
return discard_frame;
@@ -262,7 +265,7 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
ret = discard_frame;
if (unlikely(p->des01.rx.length_error)) {
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
if (unlikely(p->des01.rx.mii_error)) {
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void mac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(unsigned long ioaddr)
{
return;
}
-static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -302,13 +305,13 @@ static void mac100_set_filter(struct net_device *dev)
value |= MAC_CONTROL_PR;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
MAC_CONTROL_HP);
- } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value |= MAC_CONTROL_PM;
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (dev->mc_count == 0) { /* no multicast */
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
@@ -319,12 +322,12 @@ static void mac100_set_filter(struct net_device *dev)
/* Perfect filter mode for physical address and Hash
filter for multicast */
value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
- | MAC_CONTROL_HO);
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
@@ -347,7 +350,7 @@ static void mac100_set_filter(struct net_device *dev)
return;
}
-static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +362,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
return;
}
-/* No PMT module supported in our SoC for the Ethernet Controller. */
-static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
{
return;
}
-static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -381,7 +386,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
for (i = 0; i < ring_size; i++) {
@@ -393,32 +398,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int mac100_get_tx_owner(struct dma_desc *p)
+static int dwmac100_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
}
-static int mac100_get_rx_owner(struct dma_desc *p)
+static int dwmac100_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
}
-static void mac100_set_tx_owner(struct dma_desc *p)
+static void dwmac100_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
}
-static void mac100_set_rx_owner(struct dma_desc *p)
+static void dwmac100_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
}
-static int mac100_get_tx_ls(struct dma_desc *p)
+static int dwmac100_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
}
-static void mac100_release_tx_desc(struct dma_desc *p)
+static void dwmac100_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
@@ -444,74 +449,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
return;
}
-static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
p->des01.tx.buffer1_size = len;
}
-static void mac100_clear_tx_ic(struct dma_desc *p)
+static void dwmac100_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
}
-static void mac100_close_tx_desc(struct dma_desc *p)
+static void dwmac100_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
}
-static int mac100_get_rx_frame_len(struct dma_desc *p)
+static int dwmac100_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.rx.frame_length;
}
-struct stmmac_ops mac100_driver = {
- .core_init = mac100_core_init,
- .dump_mac_regs = mac100_dump_mac_regs,
- .dma_init = mac100_dma_init,
- .dump_dma_regs = mac100_dump_dma_regs,
- .dma_mode = mac100_dma_operation_mode,
- .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
- .tx_status = mac100_get_tx_frame_status,
- .rx_status = mac100_get_rx_frame_status,
- .get_tx_len = mac100_get_tx_len,
- .set_filter = mac100_set_filter,
- .flow_ctrl = mac100_flow_ctrl,
- .pmt = mac100_pmt,
- .init_rx_desc = mac100_init_rx_desc,
- .init_tx_desc = mac100_init_tx_desc,
- .get_tx_owner = mac100_get_tx_owner,
- .get_rx_owner = mac100_get_rx_owner,
- .release_tx_desc = mac100_release_tx_desc,
- .prepare_tx_desc = mac100_prepare_tx_desc,
- .clear_tx_ic = mac100_clear_tx_ic,
- .close_tx_desc = mac100_close_tx_desc,
- .get_tx_ls = mac100_get_tx_ls,
- .set_tx_owner = mac100_set_tx_owner,
- .set_rx_owner = mac100_set_rx_owner,
- .get_rx_frame_len = mac100_get_rx_frame_len,
- .host_irq_status = mac100_irq_status,
- .set_umac_addr = mac100_set_umac_addr,
- .get_umac_addr = mac100_get_umac_addr,
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *mac100_setup(unsigned long ioaddr)
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
+
+struct stmmac_desc_ops dwmac100_desc_ops = {
+ .tx_status = dwmac100_get_tx_frame_status,
+ .rx_status = dwmac100_get_rx_frame_status,
+ .get_tx_len = dwmac100_get_tx_len,
+ .init_rx_desc = dwmac100_init_rx_desc,
+ .init_tx_desc = dwmac100_init_tx_desc,
+ .get_tx_owner = dwmac100_get_tx_owner,
+ .get_rx_owner = dwmac100_get_rx_owner,
+ .release_tx_desc = dwmac100_release_tx_desc,
+ .prepare_tx_desc = dwmac100_prepare_tx_desc,
+ .clear_tx_ic = dwmac100_clear_tx_ic,
+ .close_tx_desc = dwmac100_close_tx_desc,
+ .get_tx_ls = dwmac100_get_tx_ls,
+ .set_tx_owner = dwmac100_set_tx_owner,
+ .set_rx_owner = dwmac100_set_rx_owner,
+ .get_rx_frame_len = dwmac100_get_rx_frame_len,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
{
struct mac_device_info *mac;
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- pr_info("\tMAC 10/100\n");
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->desc = &dwmac100_desc_ops;
+ mac->dma = &dwmac100_dma_ops;
- mac->ops = &mac100_driver;
- mac->hw.pmt = PMT_NOT_SUPPORTED;
- mac->hw.link.port = MAC_CONTROL_PS;
- mac->hw.link.duplex = MAC_CONTROL_F;
- mac->hw.link.speed = 0;
- mac->hw.mii.addr = MAC_MII_ADDR;
- mac->hw.mii.data = MAC_MII_DATA;
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
return mac;
}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..0f8f110d004a 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a148..62dca0e384e7 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
#define GMAC_CONTROL 0x00000000 /* Configuration */
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum gmac_irq_status {
+enum dwmac1000_irq_status {
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+#undef DWMAC1000_DEBUG
+/* #define DWMAC1000__DEBUG */
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+#ifdef DWMAC1000__DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..90dbb4f41ef3
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void dwmac1000_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (!netdev_mc_empty(dev)) {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+
+static void dwmac1000_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->mac = &dwmac1000_ops;
+ mac->desc = &dwmac1000_desc_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->pmt = PMT_SUPPORTED;
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee68953..39d436a2da68 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
+ This contains the functions to handle the dma and descriptors.
+
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-
-#include "stmmac.h"
-#include "gmac.h"
-
-#undef GMAC_DEBUG
-/*#define GMAC_DEBUG*/
-#undef FRAME_FILTER_DEBUG
-/*#define FRAME_FILTER_DEBUG*/
-#ifdef GMAC_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
-static void gmac_dump_regs(unsigned long ioaddr)
-{
- int i;
- pr_info("\t----------------------------------------------\n"
- "\t GMAC registers (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
-
- for (i = 0; i < 55; i++) {
- int offset = i * 4;
- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
- offset, readl(ioaddr + offset));
- }
- return;
-}
-
-static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
}
/* Transmit FIFO flush operation */
-static void gmac_flush_tx_fifo(unsigned long ioaddr)
+static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
}
/* Not yet implemented --- no RMON module */
-static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+static void dwmac1000_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x, unsigned long ioaddr)
{
return;
}
-static void gmac_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
{
int i;
pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+static int dwmac1000_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.frame_flushed)) {
DBG(KERN_ERR "\tframe_flushed error\n");
x->tx_frame_flushed++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.underflow_error)) {
DBG(KERN_ERR "\tunderflow error\n");
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.payload_error)) {
DBG(KERN_ERR "\tAddr/Payload csum error\n");
x->tx_payload_error++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
ret = -1;
@@ -245,19 +218,19 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int gmac_get_tx_len(struct dma_desc *p)
+static int dwmac1000_get_tx_len(struct dma_desc *p)
{
return p->des01.etx.buffer1_size;
}
-static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
{
int ret = good_frame;
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p)
+static int dwmac1000_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x, struct dma_desc *p)
{
int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
if (unlikely(p->des01.erx.dribbling)) {
@@ -358,7 +331,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
}
if (unlikely(p->des01.erx.length_error)) {
DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void gmac_irq_status(unsigned long ioaddr)
-{
- u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
-
- /* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
- readl(ioaddr + GMAC_PMT);
- }
-
- return;
-}
-
-static void gmac_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CORE_INIT;
- writel(value, ioaddr + GMAC_CONTROL);
-
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
- /* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
-
-#ifdef STMMAC_VLAN_TAG_USED
- /* Tag detection without filtering */
- writel(0x0, ioaddr + GMAC_VLAN_TAG);
-#endif
- return;
-}
-
-static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned int value = 0;
-
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, dev->uc_count);
-
- if (dev->flags & IFF_PROMISC)
- value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value = GMAC_FRAME_FILTER_PM; /* pass all multi */
- writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
- int i;
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Hash filter for multicast */
- value = GMAC_FRAME_FILTER_HMC;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- /* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
- }
-
- /* Handle multiple unicast addresses (perfect filtering)*/
- if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
- value |= GMAC_FRAME_FILTER_PR;
- else {
- int i;
- struct dev_addr_list *uc_ptr = dev->uc_list;
-
- for (i = 0; i < dev->uc_count; i++) {
- gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
- i + 1);
-
- DBG(KERN_INFO "\t%d "
- "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
- "%02x\n", i + 1,
- uc_ptr->da_addr[0], uc_ptr->da_addr[1],
- uc_ptr->da_addr[2], uc_ptr->da_addr[3],
- uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
- uc_ptr = uc_ptr->next;
- }
- }
-
-#ifdef FRAME_FILTER_DEBUG
- /* Enable Receive all mode (to debug filtering_fail errors) */
- value |= GMAC_FRAME_FILTER_RA;
-#endif
- writel(value, ioaddr + GMAC_FRAME_FILTER);
-
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
-}
-
-static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = 0;
-
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
- if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_RFE;
- }
- if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_TFE;
- }
-
- if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
- }
-
- writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
-}
-
-static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
-{
- unsigned int pmt = 0;
-
- if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
- pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
- }
-
- writel(pmt, ioaddr + GMAC_PMT);
- return;
-}
-
-static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int gmac_get_tx_owner(struct dma_desc *p)
+static int dwmac1000_get_tx_owner(struct dma_desc *p)
{
return p->des01.etx.own;
}
-static int gmac_get_rx_owner(struct dma_desc *p)
+static int dwmac1000_get_rx_owner(struct dma_desc *p)
{
return p->des01.erx.own;
}
-static void gmac_set_tx_owner(struct dma_desc *p)
+static void dwmac1000_set_tx_owner(struct dma_desc *p)
{
p->des01.etx.own = 1;
}
-static void gmac_set_rx_owner(struct dma_desc *p)
+static void dwmac1000_set_rx_owner(struct dma_desc *p)
{
p->des01.erx.own = 1;
}
-static int gmac_get_tx_ls(struct dma_desc *p)
+static int dwmac1000_get_tx_ls(struct dma_desc *p)
{
return p->des01.etx.last_segment;
}
-static void gmac_release_tx_desc(struct dma_desc *p)
+static void dwmac1000_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
return;
}
-static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des01.etx.checksum_insertion = cic_full;
}
-static void gmac_clear_tx_ic(struct dma_desc *p)
+static void dwmac1000_clear_tx_ic(struct dma_desc *p)
{
p->des01.etx.interrupt = 0;
}
-static void gmac_close_tx_desc(struct dma_desc *p)
+static void dwmac1000_close_tx_desc(struct dma_desc *p)
{
p->des01.etx.last_segment = 1;
p->des01.etx.interrupt = 1;
}
-static int gmac_get_rx_frame_len(struct dma_desc *p)
+static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.erx.frame_length;
}
-struct stmmac_ops gmac_driver = {
- .core_init = gmac_core_init,
- .dump_mac_regs = gmac_dump_regs,
- .dma_init = gmac_dma_init,
- .dump_dma_regs = gmac_dump_dma_regs,
- .dma_mode = gmac_dma_operation_mode,
- .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
- .tx_status = gmac_get_tx_frame_status,
- .rx_status = gmac_get_rx_frame_status,
- .get_tx_len = gmac_get_tx_len,
- .set_filter = gmac_set_filter,
- .flow_ctrl = gmac_flow_ctrl,
- .pmt = gmac_pmt,
- .init_rx_desc = gmac_init_rx_desc,
- .init_tx_desc = gmac_init_tx_desc,
- .get_tx_owner = gmac_get_tx_owner,
- .get_rx_owner = gmac_get_rx_owner,
- .release_tx_desc = gmac_release_tx_desc,
- .prepare_tx_desc = gmac_prepare_tx_desc,
- .clear_tx_ic = gmac_clear_tx_ic,
- .close_tx_desc = gmac_close_tx_desc,
- .get_tx_ls = gmac_get_tx_ls,
- .set_tx_owner = gmac_set_tx_owner,
- .set_rx_owner = gmac_set_rx_owner,
- .get_rx_frame_len = gmac_get_rx_frame_len,
- .host_irq_status = gmac_irq_status,
- .set_umac_addr = gmac_set_umac_addr,
- .get_umac_addr = gmac_get_umac_addr,
+struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
};
-struct mac_device_info *gmac_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- mac->ops = &gmac_driver;
- mac->hw.pmt = PMT_SUPPORTED;
- mac->hw.link.port = GMAC_CONTROL_PS;
- mac->hw.link.duplex = GMAC_CONTROL_DM;
- mac->hw.link.speed = GMAC_CONTROL_FES;
- mac->hw.mii.addr = GMAC_MII_ADDR;
- mac->hw.mii.data = GMAC_MII_DATA;
-
- return mac;
-}
+struct stmmac_desc_ops dwmac1000_desc_ops = {
+ .tx_status = dwmac1000_get_tx_frame_status,
+ .rx_status = dwmac1000_get_rx_frame_status,
+ .get_tx_len = dwmac1000_get_tx_len,
+ .init_rx_desc = dwmac1000_init_rx_desc,
+ .init_tx_desc = dwmac1000_init_tx_desc,
+ .get_tx_owner = dwmac1000_get_tx_owner,
+ .get_rx_owner = dwmac1000_get_rx_owner,
+ .release_tx_desc = dwmac1000_release_tx_desc,
+ .prepare_tx_desc = dwmac1000_prepare_tx_desc,
+ .clear_tx_ic = dwmac1000_clear_tx_ic,
+ .close_tx_desc = dwmac1000_close_tx_desc,
+ .get_tx_ls = dwmac1000_get_tx_ls,
+ .set_tx_owner = dwmac1000_set_tx_owner,
+ .set_rx_owner = dwmac1000_set_rx_owner,
+ .get_rx_frame_len = dwmac1000_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..de848d9f6060
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
+extern void dwmac_enable_dma_irq(unsigned long ioaddr);
+extern void dwmac_disable_dma_irq(unsigned long ioaddr);
+extern void dwmac_dma_start_tx(unsigned long ioaddr);
+extern void dwmac_dma_stop_tx(unsigned long ioaddr);
+extern void dwmac_dma_start_rx(unsigned long ioaddr);
+extern void dwmac_dma_stop_rx(unsigned long ioaddr);
+extern int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..d4adb1eaa447
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(unsigned long ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(unsigned long ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(unsigned long ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+void dwmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(INFO, "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(INFO, "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(INFO, "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(INFO, "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(INFO, "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(INFO, "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(INFO, "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(INFO, "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(INFO, "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(INFO, "\n\n");
+ return ret;
+}
+
+
+void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e5..ba35e6943cf4 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_09"
+#define DRV_MODULE_VERSION "Jan_2010"
+#include <linux/stmmac.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
- struct mac_device_info *mac_type;
+ struct mac_device_info *hw;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
void *bsp_priv;
int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
#endif
};
+#ifdef CONFIG_STM_DRIVERS
+#include <linux/stm/pad.h>
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
+
+ /* Pad routing setup */
+ if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
+ dev_name(&pdev->dev)))) {
+ printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+#else
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a0758..c021eaa3ca69 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
#define REG_SPACE_SIZE 0x1054
#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -61,7 +62,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_toolong),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc),
- STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
STMMAC_STAT(rx_gmac_overflow),
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
} else {
unsigned long ioaddr = netdev->base_addr;
- priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
+ priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
}
spin_unlock(&priv->lock);
return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i;
/* Update HW stats if supported */
- priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
- ioaddr);
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
+ ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07f..a6733612d64a 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
@@ -45,7 +44,6 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
-#include <linux/stm/soc.h>
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
- ctrl &= ~priv->mac_type->hw.link.duplex;
+ ctrl &= ~priv->hw->link.duplex;
else
- ctrl |= priv->mac_type->hw.link.duplex;
+ ctrl |= priv->hw->link.duplex;
priv->oldduplex = phydev->duplex;
}
/* Flow Control operation */
if (phydev->pause)
- priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
- fc, pause_time);
+ priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case 1000:
if (likely(priv->is_gmac))
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
break;
case 100:
case 10:
if (priv->is_gmac) {
- ctrl |= priv->mac_type->hw.link.port;
+ ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
- ctrl |=
- priv->mac_type->hw.link.
- speed;
+ ctrl |= priv->hw->link.speed;
} else {
- ctrl &=
- ~(priv->mac_type->hw.
- link.speed);
+ ctrl &= ~(priv->hw->link.speed);
}
} else {
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
}
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
default:
if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE]; /* PHY to connect */
- char bus_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
priv->oldlink = 0;
priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
}
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->cur_tx = 0;
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + i;
if (p->des2)
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
- DMA_TO_DEVICE);
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_start_tx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA tx process.
- */
-static void stmmac_dma_start_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-static void stmmac_dma_stop_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-/**
- * stmmac_dma_start_rx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA rx process.
- */
-static void stmmac_dma_start_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void stmmac_dma_stop_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (!priv->is_gmac) {
/* MAC 10/100 */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
priv->tx_coe = NO_HW_CSUM;
} else {
if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->mac_type->ops->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
priv->tx_coe = HW_CSUM;
} else {
/* Checksum computation is performed in software. */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
priv->tx_coe = NO_HW_CSUM;
}
}
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
return;
}
-#ifdef STMMAC_DEBUG
-/**
- * show_tx_process_state
- * @status: tx descriptor status field
- * Description: it shows the Transmit Process State for CSR5[22:20]
- */
-static void show_tx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- TX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- TX (Running):Fetching the Tx desc\n");
- break;
- case 2:
- pr_info("- TX (Running): Waiting for end of tx\n");
- break;
- case 3:
- pr_info("- TX (Running): Reading the data "
- "and queuing the data into the Tx buf\n");
- break;
- case 6:
- pr_info("- TX (Suspended): Tx Buff Underflow "
- "or an unavailable Transmit descriptor\n");
- break;
- case 7:
- pr_info("- TX (Running): Closing Tx descriptor\n");
- break;
- default:
- break;
- }
- return;
-}
-
-/**
- * show_rx_process_state
- * @status: rx descriptor status field
- * Description: it shows the Receive Process State for CSR5[19:17]
- */
-static void show_rx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- RX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- RX (Running): Fetching the Rx desc\n");
- break;
- case 2:
- pr_info("- RX (Running):Checking for end of pkt\n");
- break;
- case 3:
- pr_info("- RX (Running): Waiting for Rx pkt\n");
- break;
- case 4:
- pr_info("- RX (Suspended): Unavailable Rx buf\n");
- break;
- case 5:
- pr_info("- RX (Running): Closing Rx descriptor\n");
- break;
- case 6:
- pr_info("- RX(Running): Flushing the current frame"
- " from the Rx buf\n");
- break;
- case 7:
- pr_info("- RX (Running): Queuing the Rx frame"
- " from the Rx buf into memory\n");
- break;
- default:
- break;
- }
- return;
-}
-#endif
-
/**
* stmmac_tx:
* @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
- if (priv->mac_type->ops->get_tx_owner(p))
+ if (priv->hw->desc->get_tx_owner(p))
break;
/* Verify tx error by looking at the last segment */
- last = priv->mac_type->ops->get_tx_ls(p);
+ last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->mac_type->ops->tx_status(&priv->dev->stats,
- &priv->xstats,
- p, ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
if (likely(p->des2))
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
+ priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
if (unlikely(p->des3))
p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL;
}
- priv->mac_type->ops->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p);
entry = (++priv->dirty_tx) % txsize;
}
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
unsigned int has_work = 0;
int rxret, tx_work = 0;
- rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
(priv->cur_rx % priv->dma_rx_size));
if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
{
netif_stop_queue(priv->dev);
- stmmac_dma_stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->dev->base_addr);
dma_free_tx_skbufs(priv);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- stmmac_dma_start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->dev->base_addr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
return;
}
-/**
- * stmmac_dma_interrupt - Interrupt handler for the driver
- * @dev: net device structure
- * Description: Interrupt handler for the driver (DMA).
- */
-static void stmmac_dma_interrupt(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- struct stmmac_priv *priv = netdev_priv(dev);
- /* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
-#ifdef STMMAC_DEBUG
- /* It displays the DMA transmit process state (CSR5 register) */
- if (netif_msg_tx_done(priv))
- show_tx_process_state(intr_status);
- if (netif_msg_rx_status(priv))
- show_rx_process_state(intr_status);
-#endif
- /* ABNORMAL interrupts */
- if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
- if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(intr, INFO, "transmit underflow\n");
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
- /* Try to bump up the threshold */
- tc += 64;
- priv->mac_type->ops->dma_mode(ioaddr, tc,
- SF_DMA_MODE);
- priv->xstats.threshold = tc;
- }
- stmmac_tx_err(priv);
- priv->xstats.tx_undeflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(intr, INFO, "transmit jabber\n");
- priv->xstats.tx_jabber_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(intr, INFO, "recv overflow\n");
- priv->xstats.rx_overflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(intr, INFO, "receive buffer unavailable\n");
- priv->xstats.rx_buf_unav_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(intr, INFO, "receive process stopped\n");
- priv->xstats.rx_process_stopped_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(intr, INFO, "receive watchdog\n");
- priv->xstats.rx_watchdog_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(intr, INFO, "transmit early interrupt\n");
- priv->xstats.tx_early_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(intr, INFO, "transmit process stopped\n");
- priv->xstats.tx_process_stopped_irq++;
- stmmac_tx_err(priv);
- }
- if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(intr, INFO, "fatal bus error\n");
- priv->xstats.fatal_bus_error_irq++;
- stmmac_tx_err(priv);
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ unsigned long ioaddr = priv->dev->base_addr;
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
+ &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
}
- }
-
- /* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
- priv->xstats.normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- _stmmac_schedule(priv);
- }
-
- /* Optional hardware blocks, interrupts should be disabled */
- if (unlikely(intr_status &
- (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
- pr_info("%s: unexpected status %08x\n", __func__, intr_status);
-
- /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
- writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "\n\n");
+ stmmac_tx_err(priv);
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
return;
}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
- priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+ if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
}
/* Copy the MAC addr into the HW */
- priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->bus_setup)
+ priv->bus_setup(ioaddr);
/* Initialize the MAC Core */
- priv->mac_type->ops->core_init(ioaddr);
+ priv->hw->mac->core_init(ioaddr);
priv->shutdown = 0;
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->mac_type->ops->dump_mac_regs(ioaddr);
- priv->mac_type->ops->dump_dma_regs(ioaddr);
+ priv->hw->mac->dump_regs(ioaddr);
+ priv->hw->dma->dump_regs(ioaddr);
}
if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
desc->des2 = dma_map_single(priv->device, skb->data,
BUF_SIZE_8KiB, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
skb->data + BUF_SIZE_8KiB,
buf2_size, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 0,
- buf2_size, csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
return entry;
}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
frag->page_offset,
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
- priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
- csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
}
/* Interrupt on completition only for the latest segment */
- priv->mac_type->ops->close_tx_desc(desc);
+ priv->hw->desc->close_tx_desc(desc);
#ifdef CONFIG_STMMAC_TIMER
/* Clean IC while using timer */
if (likely(priv->tm->enable))
- priv->mac_type->ops->clear_tx_ic(desc);
+ priv->hw->desc->clear_tx_ic(desc);
#endif
/* To avoid raise condition */
- priv->mac_type->ops->set_tx_owner(first);
+ priv->hw->desc->set_tx_owner(first);
priv->cur_tx++;
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* CSR1 enables the transmit DMA to check for new descriptor */
- writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+ priv->hw->dma->enable_dma_transmission(dev->base_addr);
return NETDEV_TX_OK;
}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
- priv->mac_type->ops->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p + entry);
}
return;
}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
#endif
count = 0;
- while (!priv->mac_type->ops->get_rx_owner(p)) {
+ while (!priv->hw->desc->get_rx_owner(p)) {
int status;
if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
- &priv->xstats, p));
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
if (unlikely(status == discard_frame))
priv->dev->stats.rx_errors++;
else {
struct sk_buff *skb;
/* Length should omit the CRC */
- int frame_len =
- priv->mac_type->ops->get_rx_frame_len(p) - 4;
+ int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
- priv->mac_type->ops->set_filter(dev);
+ priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
return;
}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->is_gmac) {
unsigned long ioaddr = dev->base_addr;
/* To handle GMAC own interrupts */
- priv->mac_type->ops->host_irq_status(ioaddr);
+ priv->hw->mac->host_irq_status(ioaddr);
}
- stmmac_dma_interrupt(dev);
+
+ stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
if (priv->is_gmac)
- device = gmac_setup(ioaddr);
+ device = dwmac1000_setup(ioaddr);
else
- device = mac100_setup(ioaddr);
+ device = dwmac100_setup(ioaddr);
if (!device)
return -ENOMEM;
- priv->mac_type = device;
+ priv->hw = device;
- priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ priv->wolenabled = priv->hw->pmt; /* PMT supported */
if (priv->wolenabled == PMT_SUPPORTED)
priv->wolopts = WAKE_MAGIC; /* Magic Frame */
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
static int stmmacphy_dvr_probe(struct platform_device *pdev)
{
- struct plat_stmmacphy_data *plat_dat;
- plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
static int stmmac_associate_phy(struct device *dev, void *data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat;
-
- plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
- plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
+ /* Verify embedded resource for the platform */
+ ret = stmmac_claim_resource(pdev);
+ if (ret < 0)
+ goto out;
+
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bus_setup = plat_dat->bus_setup;
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
static int stmmac_dvr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
- stmmac_dma_stop_rx(ndev->base_addr);
- stmmac_dma_stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(ndev->base_addr);
+ priv->hw->dma->stop_tx(ndev->base_addr);
stmmac_mac_disable_rx(ndev->base_addr);
stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx,
- priv->dma_rx_size, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx,
- priv->dma_tx_size);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(dev->base_addr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(dev->base_addr,
+ priv->wolopts);
} else {
stmmac_mac_disable_rx(dev->base_addr);
}
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(dev->base_addr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
stmmac_mac_enable_rx(ioaddr);
stmmac_mac_enable_tx(ioaddr);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22fc..fffe1d037fe6 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
u16 value =
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index b447a8719427..efedc252e4be 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -414,7 +414,7 @@ static int init586(struct net_device *dev)
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi=dev->mc_list;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 0ca4241b4f63..99998862c22e 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -917,7 +917,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 25e81ebd9cd8..dfea56fa39e3 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1013,7 +1013,7 @@ static void bigmac_set_multicast(struct net_device *dev)
while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
udelay(20);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writel(0xffff, bregs + BMAC_HTABLE0);
sbus_writel(0xffff, bregs + BMAC_HTABLE1);
sbus_writel(0xffff, bregs + BMAC_HTABLE2);
@@ -1028,7 +1028,7 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca60..4171259590b2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static const struct pci_device_id sundance_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
@@ -1517,18 +1517,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab9..d497ec053953 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
int i;
if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
+ (netdev_mc_count(gp->dev) > 256)) {
for (i=0; i<16; i++)
writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
rxcfg |= MAC_RXCFG_HFE;
@@ -1852,7 +1852,7 @@ static u32 gem_setup_multicast(struct gem *gp)
for (i = 0; i < 16; i++)
hash_table[i] = 0;
- for (i = 0; i < gp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(gp->dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8a..905df35ff78a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1516,7 +1516,7 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
- (hp->dev->mc_count > 64)) {
+ (netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -1531,7 +1531,7 @@ static int happy_meal_init(struct happy_meal *hp)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < hp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(hp->dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -2373,7 +2373,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
spin_lock_irq(&hp->happy_lock);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -2387,7 +2387,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -3211,7 +3211,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id happymeal_pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 64e7d08c878f..cf9d5bb9e1e9 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1196,7 +1196,7 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 45c383f285ee..3bc35d86ed66 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -636,7 +636,7 @@ static void qe_set_multicast(struct net_device *dev)
/* Lock out others. */
netif_stop_queue(dev);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
@@ -653,7 +653,7 @@ static void qe_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f3..d65764ea1d83 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+ printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
list_add(&vp->list, &vnet_list);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..d838d4015c63 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
{ "TOSHIBA TC35815/TX4939" },
};
-static const struct pci_device_id tc35815_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
@@ -1941,18 +1941,18 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > CAM_ENTRY_MAX - 3) {
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr = dev->mc_list;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
if (!cur_addr)
break;
/* entry 0,1 is reserved. */
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b938..b295b926bc45 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -62,6 +62,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "tehuti.h"
static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
@@ -105,26 +107,24 @@ static void print_hw_id(struct pci_dev *pdev)
pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
- printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME,
- nic->port_num == 1 ? "" : ", 2-Port");
- printk(KERN_INFO
- "tehuti: srom 0x%x fpga %d build %u lane# %d"
- " max_pl 0x%x mrrs 0x%x\n",
- readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
- readl(nic->regs + FPGA_SEED),
- GET_LINK_STATUS_LANES(pci_link_status),
- GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
+ pr_info("%s%s\n", BDX_NIC_NAME,
+ nic->port_num == 1 ? "" : ", 2-Port");
+ pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
+ readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
+ readl(nic->regs + FPGA_SEED),
+ GET_LINK_STATUS_LANES(pci_link_status),
+ GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
}
static void print_fw_id(struct pci_nic *nic)
{
- printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER));
+ pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
}
static void print_eth_id(struct net_device *ndev)
{
- printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME,
- (ndev->if_port == 0) ? 'A' : 'B');
+ netdev_info(ndev, "%s, Port %c\n",
+ BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
}
@@ -160,7 +160,7 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
f->va = pci_alloc_consistent(priv->pdev,
memsz + FIFO_EXTRA_SPACE, &f->da);
if (!f->va) {
- ERR("pci_alloc_consistent failed\n");
+ pr_err("pci_alloc_consistent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -204,13 +204,13 @@ static void bdx_link_changed(struct bdx_priv *priv)
if (netif_carrier_ok(priv->ndev)) {
netif_stop_queue(priv->ndev);
netif_carrier_off(priv->ndev);
- ERR("%s: Link Down\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Down\n");
}
} else {
if (!netif_carrier_ok(priv->ndev)) {
netif_wake_queue(priv->ndev);
netif_carrier_on(priv->ndev);
- ERR("%s: Link Up\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Up\n");
}
}
}
@@ -226,10 +226,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
bdx_link_changed(priv);
if (isr & IR_PCIE_LINK)
- ERR("%s: PCI-E Link Fault\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Link Fault\n");
if (isr & IR_PCIE_TOUT)
- ERR("%s: PCI-E Time Out\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Time Out\n");
}
@@ -345,7 +345,7 @@ out:
release_firmware(fw);
if (rc) {
- ERR("%s: firmware loading failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "firmware loading failed\n");
if (rc == -EIO)
DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
READ_REG(priv, regVPC),
@@ -419,9 +419,11 @@ static int bdx_hw_start(struct bdx_priv *priv)
WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
-#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
- if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
- ndev->name, ndev)))
+#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
+
+ rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
+ ndev->name, ndev);
+ if (rc)
goto err_irq;
bdx_enable_interrupts(priv);
@@ -462,7 +464,7 @@ static int bdx_hw_reset_direct(void __iomem *regs)
readl(regs + regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -486,7 +488,7 @@ static int bdx_hw_reset(struct bdx_priv *priv)
READ_REG(priv, regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -510,8 +512,7 @@ static int bdx_sw_reset(struct bdx_priv *priv)
mdelay(10);
}
if (i == 50)
- ERR("%s: SW reset timeout. continuing anyway\n",
- priv->ndev->name);
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
/* 6. disable intrs */
WRITE_REG(priv, regRDINTCM0, 0);
@@ -604,18 +605,15 @@ static int bdx_open(struct net_device *ndev)
if (netif_running(ndev))
netif_stop_queue(priv->ndev);
- if ((rc = bdx_tx_init(priv)))
- goto err;
-
- if ((rc = bdx_rx_init(priv)))
- goto err;
-
- if ((rc = bdx_fw_load(priv)))
+ if ((rc = bdx_tx_init(priv)) ||
+ (rc = bdx_rx_init(priv)) ||
+ (rc = bdx_fw_load(priv)))
goto err;
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
- if ((rc = bdx_hw_start(priv)))
+ rc = bdx_hw_start(priv);
+ if (rc)
goto err;
napi_enable(&priv->napi);
@@ -647,7 +645,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE) {
error = copy_from_user(data, ifr->ifr_data, sizeof(data));
if (error) {
- ERR("cant copy from user\n");
+ pr_err("cant copy from user\n");
RET(error);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@ -708,7 +706,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
ENTER;
DBG2("vid=%d value=%d\n", (int)vid, enable);
if (unlikely(vid >= 4096)) {
- ERR("tehuti: invalid VID: %u (> 4096)\n", vid);
+ pr_err("invalid VID: %u (> 4096)\n", vid);
RET();
}
reg = regVLAN_0 + (vid / 32) * 4;
@@ -776,8 +774,8 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
/* enforce minimum frame size */
if (new_mtu < ETH_ZLEN) {
- ERR("%s: %s mtu %d is less then minimal %d\n",
- BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN);
+ netdev_err(ndev, "mtu %d is less then minimal %d\n",
+ new_mtu, ETH_ZLEN);
RET(-EINVAL);
}
@@ -808,7 +806,7 @@ static void bdx_setmulti(struct net_device *ndev)
/* set IMF to accept all multicast frmaes */
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
u8 hash;
struct dev_mc_list *mclist;
u32 reg, val;
@@ -840,7 +838,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
} else {
- DBG("only own mac %d\n", ndev->mc_count);
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
rxf_val |= GMAC_RX_FILTER_AB;
}
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
@@ -1028,17 +1026,16 @@ static int bdx_rx_init(struct bdx_priv *priv)
regRXF_CFG0_0, regRXF_CFG1_0,
regRXF_RPTR_0, regRXF_WPTR_0))
goto err_mem;
- if (!
- (priv->rxdb =
- bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
- sizeof(struct rxf_desc))))
+ priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
+ sizeof(struct rxf_desc));
+ if (!priv->rxdb)
goto err_mem;
priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
return 0;
err_mem:
- ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name);
+ netdev_err(priv->ndev, "Rx init failed\n");
return -ENOMEM;
}
@@ -1115,8 +1112,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
ENTER;
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
- if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) {
- ERR("NO MEM: dev_alloc_skb failed\n");
+ skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err("NO MEM: dev_alloc_skb failed\n");
break;
}
skb->dev = priv->ndev;
@@ -1337,9 +1335,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
u16 rxd_vlan)
{
- DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d "
- "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
- "va_lo %d va_hi %d\n",
+ DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
@@ -1591,7 +1587,7 @@ static int bdx_tx_init(struct bdx_priv *priv)
return 0;
err_mem:
- ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Tx init failed\n");
return -ENOMEM;
}
@@ -1609,7 +1605,7 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
fsize = f->m.rptr - f->m.wptr;
if (fsize <= 0)
fsize = f->m.memsz + fsize;
- return (fsize);
+ return fsize;
}
/* bdx_tx_transmit - send packet to NIC
@@ -1937,8 +1933,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
RET(-ENOMEM);
/************** pci *****************/
- if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
- goto err_pci; /* it's not a problem though */
+ err = pci_enable_device(pdev);
+ if (err) /* it triggers interrupt, dunno why. */
+ goto err_pci; /* it's not a problem though */
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
@@ -1946,14 +1943,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR "tehuti: No usable DMA configuration"
- ", aborting\n");
+ pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
pci_using_dac = 0;
}
- if ((err = pci_request_regions(pdev, BDX_DRV_NAME)))
+ err = pci_request_regions(pdev, BDX_DRV_NAME);
+ if (err)
goto err_dma;
pci_set_master(pdev);
@@ -1961,25 +1958,26 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pciaddr = pci_resource_start(pdev, 0);
if (!pciaddr) {
err = -EIO;
- ERR("tehuti: no MMIO resource\n");
+ pr_err("no MMIO resource\n");
goto err_out_res;
}
- if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) {
+ regionSize = pci_resource_len(pdev, 0);
+ if (regionSize < BDX_REGS_SIZE) {
err = -EIO;
- ERR("tehuti: MMIO resource (%x) too small\n", regionSize);
+ pr_err("MMIO resource (%x) too small\n", regionSize);
goto err_out_res;
}
nic->regs = ioremap(pciaddr, regionSize);
if (!nic->regs) {
err = -EIO;
- ERR("tehuti: ioremap failed\n");
+ pr_err("ioremap failed\n");
goto err_out_res;
}
if (pdev->irq < 2) {
err = -EIO;
- ERR("tehuti: invalid irq (%d)\n", pdev->irq);
+ pr_err("invalid irq (%d)\n", pdev->irq);
goto err_out_iomap;
}
pci_set_drvdata(pdev, nic);
@@ -1996,8 +1994,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->irq_type = IRQ_INTX;
#ifdef BDX_MSI
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
- if ((err = pci_enable_msi(pdev)))
- ERR("Tehuti: Can't eneble msi. error is %d\n", err);
+ err = pci_enable_msi(pdev);
+ if (err)
+ pr_err("Can't eneble msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
@@ -2006,9 +2005,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** netdev **************/
for (port = 0; port < nic->port_num; port++) {
- if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) {
+ ndev = alloc_etherdev(sizeof(struct bdx_priv));
+ if (!ndev) {
err = -ENOMEM;
- printk(KERN_ERR "tehuti: alloc_etherdev failed\n");
+ pr_err("alloc_etherdev failed\n");
goto err_out_iomap;
}
@@ -2075,12 +2075,13 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*bdx_hw_reset(priv); */
if (bdx_read_mac(priv)) {
- printk(KERN_ERR "tehuti: load MAC address failed\n");
+ pr_err("load MAC address failed\n");
goto err_out_iomap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
- if ((err = register_netdev(ndev))) {
- printk(KERN_ERR "tehuti: register_netdev failed\n");
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("register_netdev failed\n");
goto err_out_free;
}
netif_carrier_off(ndev);
@@ -2294,13 +2295,13 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
/* Convert RX fifo size to number of pending packets */
static inline int bdx_rx_fifo_size_to_packets(int rx_size)
{
- return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc));
+ return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
}
/* Convert TX fifo size to number of pending packets */
static inline int bdx_tx_fifo_size_to_packets(int tx_size)
{
- return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ);
+ return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
}
/*
@@ -2392,10 +2393,10 @@ static int bdx_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_STATS:
BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
!= sizeof(struct bdx_stats) / sizeof(u64));
- return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
- default:
- return -EINVAL;
+ return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
}
+
+ return -EINVAL;
}
/*
@@ -2493,10 +2494,8 @@ static struct pci_driver bdx_pci_driver = {
*/
static void __init print_driver_id(void)
{
- printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC,
- BDX_DRV_VERSION);
- printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
- BDX_MSI_STRING);
+ pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
+ pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
}
static int __init bdx_module_init(void)
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 124141909e42..a19dcf8b6b56 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -529,28 +529,34 @@ struct txd_desc {
/* Debugging Macros */
-#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
-#define DBG2(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG2(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#define BDX_ASSERT(x) BUG_ON(x)
#ifdef DEBUG
-#define ENTER do { \
- printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
+#define ENTER \
+do { \
+ pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
} while (0)
-#define RET(args...) do { \
- printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
-return args; } while (0)
+#define RET(args...) \
+do { \
+ pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
+ return args; \
+} while (0)
-#define DBG(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#else
-#define ENTER do { } while (0)
+#define ENTER do { } while (0)
#define RET(args...) return args
-#define DBG(fmt, args...) do { } while (0)
+#define DBG(fmt, args...) \
+do { \
+ if (0) \
+ pr_err(fmt, ##args); \
+} while (0)
#endif
#endif /* _BDX__H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b0238e08..385434ff3960 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.106"
-#define DRV_MODULE_RELDATE "January 12, 2010"
+#define DRV_MODULE_VERSION "3.107"
+#define DRV_MODULE_RELDATE "February 12, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -174,7 +174,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
-static struct pci_device_id tg3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +244,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -636,7 +642,6 @@ static void tg3_disable_ints(struct tg3 *tp)
static void tg3_enable_ints(struct tg3 *tp)
{
int i;
- u32 coal_now = 0;
tp->irq_sync = 0;
wmb();
@@ -644,13 +649,14 @@ static void tg3_enable_ints(struct tg3 *tp)
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- coal_now |= tnapi->coal_now;
+ tp->coal_now |= tnapi->coal_now;
}
/* Force an initial interrupt */
@@ -658,8 +664,9 @@ static void tg3_enable_ints(struct tg3 *tp)
(tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
- tw32(HOSTCC_MODE, tp->coalesce_mode |
- HOSTCC_MODE_ENABLE | coal_now);
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
}
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
@@ -1564,7 +1571,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1939,6 +1948,10 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+ return 0;
+
tg3_phy_apply_otp(tp);
if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -2019,7 +2032,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
{
struct tg3 *tp_peer = tp;
- if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
+ /* The GPIOs do something completely different on 57765. */
+ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -4538,6 +4553,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
pci_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
src_map->skb = NULL;
}
@@ -4719,7 +4740,7 @@ next_pkt_nopost:
tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
@@ -4741,7 +4762,8 @@ next_pkt_nopost:
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
- napi_schedule(&tp->napi[1].napi);
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
}
return received;
@@ -4773,12 +4795,12 @@ static void tg3_poll_link(struct tg3 *tp)
}
}
-static void tg3_rx_prodring_xfer(struct tg3 *tp,
- struct tg3_rx_prodring_set *dpr,
- struct tg3_rx_prodring_set *spr)
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
{
u32 si, di, cpycnt, src_prod_idx;
- int i;
+ int i, err = 0;
while (1) {
src_prod_idx = spr->rx_std_prod_idx;
@@ -4801,6 +4823,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_std_cons_idx;
di = dpr->rx_std_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_std_buffers[di],
&spr->rx_std_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4841,6 +4880,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_jmb_cons_idx;
di = dpr->rx_jmb_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_jmb_buffers[di],
&spr->rx_jmb_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4858,6 +4914,8 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
TG3_RX_JUMBO_RING_SIZE;
}
+
+ return err;
}
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
@@ -4879,27 +4937,29 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- int i;
- u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
- u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+ struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
- for (i = 2; i < tp->irq_cnt; i++)
- tg3_rx_prodring_xfer(tp, tnapi->prodring,
- tp->napi[i].prodring);
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ tp->napi[i].prodring);
wmb();
- if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
- u32 mbox = TG3_RX_STD_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
- }
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
- if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
- u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
- }
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
}
return work_done;
@@ -6159,8 +6219,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
- if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tp->prodring[j]);
}
}
@@ -6196,9 +6255,10 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
- tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+ if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ tg3_free_rings(tp);
return -ENOMEM;
+ }
}
return 0;
@@ -6245,7 +6305,7 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats = NULL;
}
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+ for (i = 0; i < tp->irq_cnt; i++)
tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
@@ -6257,7 +6317,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+ for (i = 0; i < tp->irq_cnt; i++) {
if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
goto err_out;
}
@@ -6322,10 +6382,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- if (tp->irq_cnt == 1)
- tnapi->prodring = &tp->prodring[0];
- else if (i)
- tnapi->prodring = &tp->prodring[i - 1];
+ tnapi->prodring = &tp->prodring[i];
/*
* If multivector RSS is enabled, vector 0 does not handle
@@ -6664,6 +6721,13 @@ static int tg3_poll_fw(struct tg3 *tp)
tp->dev->name);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
+ }
+
return 0;
}
@@ -7439,10 +7503,13 @@ static void tg3_rings_reset(struct tg3 *tp)
for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
- tw32_mailbox(tp->napi[i].prodmbox, 0);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
}
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
} else {
tp->napi[0].tx_prod = 0;
tp->napi[0].tx_cons = 0;
@@ -7528,8 +7595,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_abort_hw(tp, 1);
}
- if (reset_phy &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -7574,6 +7640,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
+ if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7772,7 +7852,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
(RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
BDINFO_FLAGS_USE_EXT_RECV);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -7834,6 +7914,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
@@ -8143,7 +8226,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -9395,7 +9482,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
- } else if (dev->mc_count < 1) {
+ } else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
@@ -9407,7 +9494,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
u32 bit;
u32 crc;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
@@ -10640,12 +10727,27 @@ static int tg3_test_memory(struct tg3 *tp)
{ 0x00008000, 0x01000},
{ 0x00010000, 0x01000},
{ 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
};
struct mem_entry *mem_tbl;
int err = 0;
int i;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
@@ -10678,12 +10780,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
struct tg3_napi *tnapi, *rnapi;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- tnapi = &tp->napi[1];
rnapi = &tp->napi[1];
- } else {
- tnapi = &tp->napi[0];
- rnapi = &tp->napi[0];
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tnapi = &tp->napi[1];
}
coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10720,8 +10822,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
+ tg3_writephy(tp, MII_TG3_FET_PTEST,
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+ /* The write needs to be flushed for the AC131 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
mac_mode |= MAC_MODE_PORT_MODE_MII;
} else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -13102,6 +13208,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13290,7 +13398,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14086,9 +14195,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14148,7 +14270,9 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
case PHY_ID_BCM5756: return "5722/5756";
case PHY_ID_BCM5906: return "5906";
case PHY_ID_BCM5761: return "5761";
- case PHY_ID_BCM5717: return "5717";
+ case PHY_ID_BCM5718C: return "5718C";
+ case PHY_ID_BCM5718S: return "5718S";
+ case PHY_ID_BCM57765: return "57765";
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8a167912902b..b4fd59623cfb 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -110,6 +110,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_57765_A0 0x57785000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1206,14 +1207,18 @@
#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
+#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
#define BUFMGR_MB_HIGH_WATER 0x00004418
#define DEFAULT_MB_HIGH_WATER 0x00000060
#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
+#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
#define BUFMGR_MB_ALLOC_BIT 0x10000000
#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1253,6 +1258,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
#define RDMAC_STATUS 0x00004804
@@ -1543,6 +1549,8 @@
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_PCIE_TL_SEL 0x00000000
+#define GRC_MODE_PCIE_PL_SEL 0x00400000
#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1550,7 +1558,13 @@
#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_PCIE_DL_SEL 0x20000000
#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
+#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
+ GRC_MODE_PCIE_PL_SEL | \
+ GRC_MODE_PCIE_DL_SEL | \
+ GRC_MODE_PCIE_HI_1K_EN)
#define GRC_MISC_CFG 0x00006804
#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1804,6 +1818,11 @@
/* 0x7e74 --> 0x8000 unused */
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT 0x00007c00
+#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
#define TG3_OTP_AGCTGT_SHIFT 1
@@ -2093,6 +2112,9 @@
/* Fast Ethernet Tranceiver definitions */
#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
+#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2682,6 +2704,7 @@ struct tg3 {
struct net_device *dev;
struct pci_dev *pdev;
+ u32 coal_now;
u32 msg_enable;
/* begin "tx thread" cacheline section */
@@ -2700,7 +2723,7 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
+ struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
/* begin "everything else" cacheline(s) section */
@@ -2812,6 +2835,7 @@ struct tg3 {
#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
+#define TG3_FLG3_L1PLLPD_EN 0x00800000
struct timer_list timer;
u16 timer_counter;
@@ -2878,7 +2902,9 @@ struct tg3 {
#define PHY_ID_BCM5756 0xbc050ed0
#define PHY_ID_BCM5784 0xbc050fa0
#define PHY_ID_BCM5761 0xbc050fd0
-#define PHY_ID_BCM5717 0x5c0d8a00
+#define PHY_ID_BCM5718C 0x5c0d8a00
+#define PHY_ID_BCM5718S 0xbc050ff0
+#define PHY_ID_BCM57765 0x5c0d8a40
#define PHY_ID_BCM5906 0xdc00ac40
#define PHY_ID_BCM8002 0x60010140
#define PHY_ID_INVALID 0xffffffff
@@ -2921,7 +2947,8 @@ struct tg3 {
(X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
(X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
(X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
- (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
+ (X) == PHY_ID_BCM5718C || (X) == PHY_ID_BCM5718S || \
+ (X) == PHY_ID_BCM57765 || (X) == PHY_ID_BCM8002)
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb3155..e44d5a074c69 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
-static struct pci_device_id tlan_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
static int TLan_PhyDp83840aCheck( struct net_device * );
*/
-static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
+static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
static void TLan_MiiSendData( u16, u32, unsigned );
static void TLan_MiiSync( u16 );
static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -1335,7 +1335,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
- for ( i = 0; i < dev->mc_count; i++ ) {
+ for ( i = 0; i < netdev_mc_count(dev); i++ ) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
(char *) &dmi->dmi_addr );
@@ -2204,7 +2204,7 @@ TLan_ResetAdapter( struct net_device *dev )
u32 data;
u8 data8;
- priv->tlanFullDuplex = FALSE;
+ priv->tlanFullDuplex = false;
priv->phyOnline=0;
netif_carrier_off(dev);
@@ -2259,7 +2259,7 @@ TLan_ResetAdapter( struct net_device *dev )
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
} else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
}
@@ -2651,14 +2651,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
} else if ( priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_HALF) {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
} else {
@@ -2695,7 +2695,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
tctl &= ~TLAN_TC_AUISEL;
if ( priv->duplex == TLAN_DUPLEX_FULL ) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( priv->speed == TLAN_SPEED_100 ) {
control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2750,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
mode = an_adv & an_lpa & 0x03E0;
if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2855,8 @@ void TLan_PhyMonitor( struct net_device *dev )
* TLan_MiiReadReg
*
* Returns:
- * 0 if ack received ok
- * 1 otherwise.
+ * false if ack received ok
+ * true if no ack received or other error
*
* Parms:
* dev The device structure containing
@@ -2875,17 +2875,17 @@ void TLan_PhyMonitor( struct net_device *dev )
*
**************************************************************/
-static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
{
u8 nack;
u16 sio, tmp;
u32 i;
- int err;
+ bool err;
int minten;
TLanPrivateInfo *priv = netdev_priv(dev);
unsigned long flags = 0;
- err = FALSE;
+ err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -2918,7 +2918,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
- err = TRUE;
+ err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e985..d13ff12d7500 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
*
****************************************************************/
-#define FALSE 0
-#define TRUE 1
-
#define TLAN_MIN_FRAME_SIZE 64
#define TLAN_MAX_FRAME_SIZE 1600
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d9629..eff68e1d107b 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
* will be stuck with 1555 lines of hex #'s in the code.
*/
-static struct pci_device_id xl_pci_tbl[] =
+static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
{
{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
@@ -1408,7 +1408,7 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a3..515f122777ab 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
#define ABYSS_IO_EXTENT 64
-static struct pci_device_id abyss_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 66272f2a0758..1ce8f85a89aa 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -996,7 +996,7 @@ static void tok_set_multicast_list(struct net_device *dev)
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
mclist = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d07..26d84daf660b 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.3 11/13/02 - Kent Yoder";
-static struct pci_device_id streamer_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
@@ -1303,7 +1303,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next)
{
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c5132..a242d125b34c 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
-static struct pci_device_id olympic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
@@ -1178,7 +1178,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index e3c42f5ac4a9..6b8868959b85 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1214,7 +1214,7 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
{
int i;
struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< dev->mc_count; i++)
+ for (i=0; i< netdev_mc_count(dev); i++)
{
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdcae..d4c7c0c0a3d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
{ {0x03, 0x01}, "3Com Token Link Velocity"},
};
-static struct pci_device_id tmspci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a69c4a48bab9..f4b30c4826fb 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1184,7 +1184,7 @@ static void tsi108_set_rx_mode(struct net_device *dev)
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
- if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mc = dev->mc_list;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6ca..007d8e75666d 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2)
- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
- dev->name, csr12, medianame[dev->if_port]);
+ dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
+ csr12, medianame[dev->if_port]);
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
if (tulip_check_duplex(dev) < 0) {
netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
} else if (tp->nwayset) {
/* Don't screw up a negotiated session! */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Using NWay-set %s media, csr12 %08x\n",
+ medianame[dev->if_port], csr12);
} else if (tp->medialock) {
;
} else if (dev->if_port == 3) {
if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
- "trying NWay.\n", dev->name, csr12);
+ dev_info(&dev->dev,
+ "No 21143 100baseTx link beat, %08x, trying NWay\n",
+ csr12);
t21142_start_nway(dev);
next_tick = 3*HZ;
}
} else if ((csr12 & 0x7000) != 0x5000) {
/* Negotiation failed. Search media types. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
- dev->name, csr12);
+ dev_info(&dev->dev,
+ "21143 negotiation failed, status %08x\n",
+ csr12);
if (!(csr12 & 4)) { /* 10mbps link beat good. */
new_csr6 = 0x82420000;
dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
iowrite32(1, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev, "Testing new 21143 media %s\n",
+ medianame[dev->if_port]);
if (new_csr6 != (tp->csr6 & ~0x00D5)) {
tp->csr6 &= 0x00D5;
tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
- dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
iowrite32(0x0001, ioaddr + CSR13);
udelay(100);
iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
- "%8.8x.\n", dev->name, csr12, csr5, csr14);
+ dev_info(&dev->dev,
+ "21143 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port], tp->sym_advertise,
- tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
else
- printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
- " link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
}
if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
#if 0 /* Restart shouldn't be needed. */
iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
+ dev->name, ioread32(ioaddr + CSR5));
#endif
tulip_start_rxtx(tp);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
- dev->name, tp->csr6, ioread32(ioaddr + CSR6),
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
} else if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
add_timer(&tp->timer);
} else if (dev->if_port == 3 || dev->if_port == 5) {
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "21143 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
del_timer_sync(&tp->timer);
t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
- printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10baseT link beat good\n");
} else if (!(csr12 & 4)) { /* 10mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10mbps sensed media\n");
dev->if_port = 0;
} else if (tp->nwayset) {
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
- dev->name, medianame[dev->if_port], tp->csr6);
+ dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
+ medianame[dev->if_port], tp->csr6);
} else { /* 100mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 100baseTx sensed media\n");
dev->if_port = 3;
tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb75..a4cff23dcdf9 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
-static struct pci_device_id de_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +382,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (netif_msg_rx_err(de))
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- de->dev->name, status);
+ dev_warn(&de->dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
@@ -487,7 +487,7 @@ rx_next:
}
if (!rx_work)
- printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+ dev_warn(&de->dev->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
@@ -504,7 +504,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
if (netif_msg_intr(de))
printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
- dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+ dev->name, status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
@@ -529,8 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ dev_err(&de->dev->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
}
return IRQ_HANDLED;
@@ -582,7 +584,8 @@ static void de_tx (struct de_private *de)
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
if (netif_msg_tx_done(de))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ de->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -674,7 +677,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
@@ -703,7 +706,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -738,7 +741,7 @@ static void __de_set_rx_mode (struct net_device *dev)
goto out;
}
- if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
macmode |= AcceptAllMulticast;
goto out;
@@ -746,7 +749,7 @@ static void __de_set_rx_mode (struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
@@ -870,7 +873,7 @@ static void de_stop_rxtx (struct de_private *de)
udelay(100);
}
- printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+ dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +908,8 @@ static void de_link_up(struct de_private *de)
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link up, media %s\n",
- de->dev->name, media_name[de->media_type]);
+ dev_info(&de->dev->dev, "link up, media %s\n",
+ media_name[de->media_type]);
}
}
@@ -915,7 +918,7 @@ static void de_link_down(struct de_private *de)
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link down\n", de->dev->name);
+ dev_info(&de->dev->dev, "link down\n");
}
}
@@ -925,7 +928,8 @@ static void de_set_media (struct de_private *de)
u32 macmode = dr32(MacMode);
if (de_is_running(de))
- printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
+ dev_warn(&de->dev->dev,
+ "chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +949,15 @@ static void de_set_media (struct de_private *de)
macmode &= ~FullDuplex;
if (netif_msg_link(de)) {
- printk(KERN_INFO
- "%s: set link %s\n"
- "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
- "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
- de->dev->name, media_name[media],
- de->dev->name, dr32(MacMode), dr32(SIAStatus),
- dr32(CSR13), dr32(CSR14), dr32(CSR15),
- de->dev->name, macmode, de->media[media].csr13,
- de->media[media].csr14, de->media[media].csr15);
+ dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
+ dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+
+ dev_info(&de->dev->dev,
+ "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
}
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
@@ -992,9 +996,8 @@ static void de21040_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, status %x\n",
- dev->name, media_name[de->media_type],
- status);
+ dev_info(&dev->dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
return;
}
@@ -1022,8 +1025,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1082,10 @@ static void de21041_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
- dev->name, media_name[de->media_type],
- dr32(MacMode), status);
+ dev_info(&dev->dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
return;
}
@@ -1150,8 +1154,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1382,7 @@ static int de_open (struct net_device *dev)
rc = de_alloc_rings(de);
if (rc) {
- printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
@@ -1387,15 +1390,14 @@ static int de_open (struct net_device *dev)
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
- dev->name, dev->irq, rc);
+ dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
@@ -1666,8 +1668,8 @@ static int de_nway_reset(struct net_device *dev)
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
- de->dev->name, status, dr32(SIAStatus));
+ dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
return 0;
}
@@ -1711,7 +1713,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
- printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
}
}
@@ -1830,9 +1832,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
}
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
- de->board_idx, ofs,
- media_name[de->media_type]);
+ pr_info("de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs, media_name[de->media_type]);
/* init SIA register values to defaults */
for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1880,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
de->media[idx].type = idx;
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: media block #%u: %s",
- de->board_idx, i,
- media_name[de->media[idx].type]);
+ pr_info("de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
bufp += sizeof (ib->opts);
@@ -1893,13 +1894,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
sizeof(ib->csr15);
if (netif_msg_probe(de))
- printk(" (%x,%x,%x)\n",
- de->media[idx].csr13,
- de->media[idx].csr14,
- de->media[idx].csr15);
+ pr_cont(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
} else if (netif_msg_probe(de))
- printk("\n");
+ pr_cont("\n");
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
@@ -2005,7 +2006,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* check for invalid IRQ value */
if (pdev->irq < 2) {
rc = -EIO;
- printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pr_err(PFX "invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
@@ -2016,14 +2017,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
+ pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pci_name(pdev));
goto err_out_res;
}
@@ -2031,9 +2032,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1),
- pciaddr, pci_name(pdev));
+ pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2045,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* make sure hardware is not running */
rc = de_reset_mac(de);
if (rc) {
- printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
@@ -2065,12 +2065,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
goto err_out_iomap;
/* print info about board and interface just registered */
- printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- de->de21040 ? "21040" : "21041",
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
pci_set_drvdata(pdev, dev);
@@ -2158,8 +2157,7 @@ static int de_resume (struct pci_dev *pdev)
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
de_init_hw(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index a8349b7200b5..0b6a9731091c 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1963,10 +1963,10 @@ SetMulticastFilter(struct net_device *dev)
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev) ;i++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
@@ -1984,7 +1984,7 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- for (j=0; j<dev->mc_count; j++) {
+ for (j=0; j<netdev_mc_count(dev); j++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
for (i=0; i<ETH_ALEN; i++) {
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 6f44ebf58910..534afbdb9c91 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
Test and make sure PCI latency is now correct for all cases.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "dmfe"
#define DRV_VERSION "1.36.4"
#define DRV_RELDATE "2002-01-17"
@@ -149,16 +151,17 @@
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) \
- do { \
- if (dmfe_debug || (dbug_now)) \
- printk(KERN_ERR DRV_NAME ": %s %lx\n",\
- (msg), (long) (value)); \
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ pr_err("%s %lx\n", \
+ (msg), (long) (value)); \
} while (0)
-#define SHOW_MEDIA_TYPE(mode) \
- printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
- (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_info("Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", \
+ (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
@@ -391,8 +394,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
struct device_node *dp = pci_device_to_OF_node(pdev);
if (dp && of_get_property(dp, "local-mac-address", NULL)) {
- printk(KERN_INFO DRV_NAME
- ": skipping on-board DM910x (use tulip)\n");
+ pr_info("skipping on-board DM910x (use tulip)\n");
return -ENODEV;
}
}
@@ -405,8 +407,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME
- ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -417,13 +418,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -438,7 +439,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
#endif
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -497,12 +498,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (err)
goto err_out_free_buf;
- printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n",
- dev->name,
- ent->driver_data >> 16,
- pci_name(pdev),
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16,
+ pci_name(pdev), dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -660,9 +658,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, netdev_mc_count(dev)); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -696,7 +694,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -706,8 +704,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
- db->tx_queue_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
return NETDEV_TX_BUSY;
}
@@ -779,12 +776,11 @@ static int dmfe_stop(struct DEVICE *dev)
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
- " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
- db->tx_fifo_underrun, db->tx_excessive_collision,
- db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
- db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
- db->reset_fatal, db->reset_TXtimeout);
+ printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
#endif
return 0;
@@ -885,7 +881,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
@@ -895,7 +891,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -992,7 +988,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+ pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -1056,6 +1052,7 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1068,19 +1065,19 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
- DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, mc_count); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, mc_count); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1191,8 +1188,7 @@ static void dmfe_timer(unsigned long data)
if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
- printk(KERN_WARNING "%s: Tx timeout - resetting\n",
- dev->name);
+ dev_warn(&dev->dev, "Tx timeout - resetting\n");
}
}
@@ -1646,7 +1642,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+ pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2089,7 +2085,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
-static struct pci_device_id dmfe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89b..93f4e8309f81 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -161,15 +161,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
if (ee_data[0] == 0xff) {
if (last_mediatable) {
controller_index++;
- printk(KERN_INFO "%s: Controller %d of multiport board.\n",
- dev->name, controller_index);
+ dev_info(&dev->dev,
+ "Controller %d of multiport board\n",
+ controller_index);
tp->mtable = last_mediatable;
ee_data = last_ee_data;
goto subsequent_board;
} else
- printk(KERN_INFO "%s: Missing EEPROM, this interface may "
- "not work correctly!\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Missing EEPROM, this interface may not work correctly!\n");
return;
}
/* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +181,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
i++; /* An Accton EN1207, not an outlaw Maxtech. */
memcpy(ee_data + 26, eeprom_fixups[i].newtable,
sizeof(eeprom_fixups[i].newtable));
- printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
- " substitute media control info.\n",
- dev->name, eeprom_fixups[i].name);
+ dev_info(&dev->dev,
+ "Old format EEPROM on '%s' board. Using substitute media control info\n",
+ eeprom_fixups[i].name);
break;
}
}
if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
- printk(KERN_INFO "%s: Old style EEPROM with no media selection "
- "information.\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Old style EEPROM with no media selection information\n");
return;
}
}
@@ -218,7 +217,8 @@ subsequent_board:
/* there is no phy information, don't even try to build mtable */
if (count == 0) {
if (tulip_debug > 0)
- printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ dev_warn(&dev->dev,
+ "no phy info, aborting mtable build\n");
return;
}
@@ -234,8 +234,8 @@ subsequent_board:
mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
mtable->csr15dir = mtable->csr15val = 0;
- printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
- media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ dev_info(&dev->dev, "EEPROM default media type %s\n",
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
for (i = 0; i < count; i++) {
struct medialeaf *leaf = &mtable->mleaf[i];
@@ -298,16 +298,17 @@ subsequent_board:
}
if (tulip_debug > 1 && leaf->media == 11) {
unsigned char *bp = leaf->leafdata;
- printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
- "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
- dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
- bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ dev_info(&dev->dev,
+ "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
+ bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2],
+ bp[4 + bp[2 + bp[1]*2]*2]);
}
- printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
- "by a %s (%d) block.\n",
- dev->name, i, medianame[leaf->media & 15], leaf->media,
- leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
- leaf->type);
+ dev_info(&dev->dev,
+ "Index #%d - Media %s (#%d) described by a %s (%d) block\n",
+ i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
}
if (new_advertise)
tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c7..1faf7a4d7202 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
#endif
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
- printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+ printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
break;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
dev->name, entry, status);
if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (unsigned long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
int received = 0;
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
- dev->name, entry, status);
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
+ dev->name, entry, status);
if (--rx_work_limit < 0)
break;
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, status);
#endif
tp->stats.tx_errors++;
if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev_err(&dev->dev,
+ "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tp->dirty_tx = dirty_tx;
if (csr5 & TxDied) {
if (tulip_debug > 2)
- printk(KERN_WARNING "%s: The transmitter stopped."
- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
- dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
+ dev_warn(&dev->dev,
+ "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
+ csr5, ioread32(ioaddr + CSR6),
+ tp->csr6);
tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
* to the 21142/3 docs that is).
* -- rmk
*/
- printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
- dev->name, tp->nir, error);
+ dev_err(&dev->dev,
+ "(%lu) System Error occurred (%d)\n",
+ tp->nir, error);
}
/* Clear all error sources, included undocumented ones! */
iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & TimerInt) {
if (tulip_debug > 2)
- printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
- dev->name, csr5);
+ dev_err(&dev->dev,
+ "Re-enabling interrupts, %08x\n",
+ csr5);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
tp->ttimer = 0;
oi++;
}
if (tx > maxtx || rx > maxrx || oi > maxoi) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Too much work during an interrupt, "
- "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+ dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
+ csr5, tp->nir, tx, rx, oi);
/* Acknowledge all interrupt sources. */
iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
entry = tp->dirty_rx % RX_RING_SIZE;
if (tp->rx_buffers[entry].skb == NULL) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
+ tp->nir, tp->cur_rx, tp->ttimer, rx);
if (tp->chip_id == LC82C168) {
iowrite32(0x00, ioaddr + CSR7);
mod_timer(&tp->timer, RUN_AT(HZ/50));
} else {
if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) set timer\n",
+ tp->nir);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
ioaddr + CSR7);
iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
+ dev->name, ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705bf..68b170ae4d15 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
switch (mleaf->type) {
case 0: /* 21140 non-MII xcvr. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
- " with control setting %2.2x.\n",
- dev->name, p[1]);
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
+ dev->name, p[1]);
dev->if_port = p[0];
if (startup)
iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
- "%4.4x/%4.4x.\n",
- dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
+ dev->name, medianame[dev->if_port],
+ setup[0], setup[1]);
if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
csr13val = setup[0];
csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
if (startup) iowrite32(csr13val, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
- dev->name, csr15dir, csr15val);
+ printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
+ dev->name, csr15dir, csr15val);
if (mleaf->type == 4)
new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
if (tp->mii_advertise == 0)
tp->mii_advertise = tp->advertising[phy_num];
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
- dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
+ dev->name, tp->mii_advertise,
+ tp->phys[phy_num]);
tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
}
break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
default:
- printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
- dev->name, mleaf->type);
+ printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
+ dev->name, mleaf->type);
new_csr6 = 0x020E0000;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
- dev->name, medianame[dev->if_port],
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
+ dev->name, medianame[dev->if_port],
ioread32(ioaddr + CSR12) & 0xff);
} else if (tp->chip_id == LC82C168) {
if (startup && ! tp->medialock)
dev->if_port = tp->mii_cnt ? 11 : 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
- dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
+ dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
if (tp->mii_cnt) {
new_csr6 = 0x810C0000;
iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
} else
new_csr6 = 0x03860000;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No media description table, assuming "
- "%s transceiver, CSR12 %2.2x.\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
}
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
- "%4.4x.\n", dev->name, bmsr, lpa);
+ dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
+ bmsr, lpa);
if (bmsr == 0xffff)
return -2;
if ((bmsr & BMSR_LSTATUS) == 0) {
int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
if ((new_bmsr & BMSR_LSTATUS) == 0) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: No link beat on the MII interface,"
- " status %4.4x.\n", dev->name, new_bmsr);
+ dev_info(&dev->dev,
+ "No link beat on the MII interface, status %04x\n",
+ new_bmsr);
return -1;
}
}
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
tulip_restart_rxtx(tp);
if (tulip_debug > 0)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII"
- "#%d link partner capability of %4.4x.\n",
- dev->name, tp->full_duplex ? "full" : "half",
- tp->phys[0], lpa);
+ dev_info(&dev->dev,
+ "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
return 1;
}
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
tp->phys[phy_idx++] = phy;
- printk (KERN_INFO "tulip%d: MII transceiver #%d "
- "config %4.4x status %4.4x advertising %4.4x.\n",
+ pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
board_idx, phy, mii_reg0, mii_status, mii_advert);
/* Fixup for DLink with miswired PHY. */
if (mii_advert != to_advert) {
- printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
- " previously advertising %4.4x.\n",
- board_idx, to_advert, phy, mii_advert);
+ printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
tulip_mdio_write (dev, phy, 4, to_advert);
}
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
}
tp->mii_cnt = phy_idx;
if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
- printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
board_idx);
tp->phys[0] = 1;
}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09dfc..966efa1a27d7 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
new_csr6 |= 0x00000200;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
- dev->name, phy_reg, medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
int phy_reg = ioread32(ioaddr + 0xB8);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
- dev->name, phy_reg, csr5);
+ printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
+ dev->name, phy_reg, csr5);
if (ioread32(ioaddr + CSR5) & TPLnkFail) {
iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
/* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
int csr5 = ioread32(ioaddr + CSR5);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
- "CSR5 %8.8x.\n",
- dev->name, phy_reg, medianame[dev->if_port], csr5);
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
if (phy_reg & 0x04000000) { /* Remote link fault */
iowrite32(0x0201F078, ioaddr + 0xB8);
next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
next_tick = 60*HZ;
} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
- "CSR5 %8.8x, PHY %3.3x.\n",
- dev->name, medianame[dev->if_port], csr12,
- ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8));
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ dev->name, medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
next_tick = 3*HZ;
if (tp->medialock) {
} else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Changing PNIC configuration to %s "
- "%s-duplex, CSR6 %8.8x.\n",
- dev->name, medianame[dev->if_port],
- tp->full_duplex ? "full" : "half", new_csr6);
+ dev_info(&dev->dev,
+ "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
+ medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half",
+ new_csr6);
}
}
}
@@ -162,7 +163,7 @@ too_good_connection:
mod_timer(&tp->timer, RUN_AT(next_tick));
if(!ioread32(ioaddr + CSR7)) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ dev_info(&dev->dev, "sw timer wakeup\n");
disable_irq(dev->irq);
tulip_refill_rx(dev);
enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf46..b8197666021e 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3)
- printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
- dev->name,ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
csr14 |= 0x00001184;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
- "csr14=%8.8x.\n", dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: On Entry to Nway, "
- "csr6=%8.8x.\n", dev->name, tp->csr6);
+ printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
+ dev->name, tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
int csr12 = ioread32(ioaddr + CSR12);
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
- " CSR5 %x, %8.8x.\n", dev->name, csr12,
- csr5, ioread32(ioaddr + CSR14));
+ dev_info(&dev->dev,
+ "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, ioread32(ioaddr + CSR14));
/* If NWay finished and we have a negotiated partner capability.
* check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
else if (negotiated & 0x0020) dev->if_port = 0;
else {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: funny autonegotiate result "
- "csr12 %8.8x advertising %4.4x\n",
- dev->name, csr12, tp->sym_advertise);
+ dev_info(&dev->dev,
+ "funny autonegotiate result csr12 %08x advertising %04x\n",
+ csr12, tp->sym_advertise);
tp->nwayset = 0;
/* so check if 100baseTx link state is okay */
if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port],
- tp->sym_advertise, tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
}
/* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
- "%8.8x.\n", dev->name, tp->csr6,
- ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6,
+ ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
return;
} else {
- printk(KERN_INFO "%s: Autonegotiation failed, "
- "using %s, link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
/* remember to turn off bit 7 - autonegotiate
* enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 100mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
/* check 100 link beat */
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 10mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 4) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+ dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
/* if all else fails default to trying 10baseT-HD */
dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e084223082..36c2725ec886 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
unsigned long flags;
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
- " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
- dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13),
- ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
}
switch (tp->chip_id) {
case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
Assume this a generic MII or SYM transceiver. */
next_tick = 60*HZ;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
- "CSR12 0x%2.2x.\n",
- dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff);
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
+ dev->name,
+ ioread32(ioaddr + CSR6), csr12 & 0xff);
break;
}
mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
s8 bitnum = p[offset];
if (p[offset+1] & 0x80) {
if (tulip_debug > 1)
- printk(KERN_DEBUG"%s: Transceiver monitor tick "
- "CSR12=%#2.2x, no media sense.\n",
- dev->name, csr12);
+ printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ dev->name, csr12);
if (mleaf->type == 4) {
if (mleaf->media == 3 && (csr12 & 0x02))
goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
break;
}
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
- " bit %d is %d, expecting %d.\n",
- dev->name, csr12, (bitnum >> 1) & 7,
- (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
- (bitnum >= 0));
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
/* Check that the specified bit has the proper value. */
if ((bitnum < 0) !=
((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ printk(KERN_DEBUG "%s: Link beat detected for %s\n",
+ dev->name,
medianame[mleaf->media & MEDIA_MASK]);
if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
if (tulip_media_cap[dev->if_port] & MediaIsFD)
goto select_next_media; /* Skip FD entries. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No link beat on media %s,"
- " trying transceiver type %s.\n",
- dev->name, medianame[mleaf->media & MEDIA_MASK],
+ printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
+ dev->name,
+ medianame[mleaf->media & MEDIA_MASK],
medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3) {
- printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
- ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "MXIC negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
}
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
- "%4.4x.\n",
- dev->name,
- tulip_mdio_read(dev, tp->phys[0], 1),
- tulip_mdio_read(dev, tp->phys[0], 5));
+ printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
+ dev->name,
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 20696b5d60a5..cce2ada07950 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -41,7 +41,6 @@
static char version[] __devinitdata =
"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -211,7 +210,7 @@ struct tulip_chip_table tulip_tbl[] = {
};
-static struct pci_device_id tulip_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -326,7 +325,8 @@ static void tulip_up(struct net_device *dev)
udelay(100);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
+ dev->name, dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -387,8 +387,9 @@ static void tulip_up(struct net_device *dev)
(dev->if_port == 12 ? 0 : dev->if_port);
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using user-specified media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev,
+ "Using user-specified media %s\n",
+ medianame[dev->if_port]);
goto media_picked;
}
}
@@ -396,8 +397,9 @@ static void tulip_up(struct net_device *dev)
int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
- dev->name, medianame[looking_for]);
+ dev_info(&dev->dev,
+ "Using EEPROM-set media %s\n",
+ medianame[looking_for]);
goto media_picked;
}
}
@@ -424,9 +426,10 @@ media_picked:
if (tp->mii_cnt) {
tulip_select_media(dev, 1);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Using MII transceiver %d, status "
- "%4.4x.\n",
- dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ dev_info(&dev->dev,
+ "Using MII transceiver %d, status %04x\n",
+ tp->phys[0],
+ tulip_mdio_read(dev, tp->phys[0], 1));
iowrite32(csr6_mask_defstate, ioaddr + CSR6);
tp->csr6 = csr6_mask_hdcap;
dev->if_port = 11;
@@ -490,9 +493,10 @@ media_picked:
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ dev->name, ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -540,27 +544,30 @@ static void tulip_tx_timeout(struct net_device *dev)
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
/* Do nothing -- the media monitor should handle this. */
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
- dev->name);
+ dev_warn(&dev->dev,
+ "Transmit timeout using MII device\n");
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
tp->chip_id == DM910X) {
- printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
- "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
- ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ dev_warn(&dev->dev,
+ "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
+ ioread32(ioaddr + CSR15));
tp->timeout_recovery = 1;
schedule_work(&tp->media_work);
goto out_unlock;
} else if (tp->chip_id == PNIC2) {
- printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
- "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
- dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
- (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
+ (int)ioread32(ioaddr + CSR5),
+ (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7),
+ (int)ioread32(ioaddr + CSR12));
} else {
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
- "%8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
dev->if_port = 0;
}
@@ -570,26 +577,26 @@ static void tulip_tx_timeout(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
int j;
- printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
- "%2.2x %2.2x %2.2x.\n",
- i, (unsigned int)tp->rx_ring[i].status,
- (unsigned int)tp->rx_ring[i].length,
- (unsigned int)tp->rx_ring[i].buffer1,
- (unsigned int)tp->rx_ring[i].buffer2,
- buf[0], buf[1], buf[2]);
+ printk(KERN_DEBUG
+ "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
+ i,
+ (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
for (j = 0; buf[j] != 0xee && j < 1600; j++)
if (j < 100)
- printk(KERN_CONT " %2.2x", buf[j]);
- printk(KERN_CONT " j=%d.\n", j);
+ pr_cont(" %02x", buf[j]);
+ pr_cont(" j=%d\n", j);
}
- printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- (unsigned int)tp->rx_ring[i].status);
- printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
+ pr_cont("\n");
}
#endif
@@ -832,8 +839,9 @@ static int tulip_close (struct net_device *dev)
tulip_down (dev);
if (tulip_debug > 1)
- printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, ioread32 (ioaddr + CSR5));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Shutting down ethercard, status was %02x\n",
+ ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
@@ -989,7 +997,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
@@ -1018,7 +1026,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1049,7 +1057,8 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
@@ -1058,14 +1067,16 @@ static void set_rx_mode(struct net_device *dev)
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
int i;
- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
@@ -1073,10 +1084,10 @@ static void set_rx_mode(struct net_device *dev)
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
- printk(KERN_INFO "%s: Added filter for %pM"
- " %8.8x bit %d.\n",
- dev->name, mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ dev_info(&dev->dev,
+ "Added filter for %pM %08x bit %d\n",
+ mclist->dmi_addr,
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
@@ -1099,7 +1110,8 @@ static void set_rx_mode(struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
@@ -1288,9 +1300,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
unsigned int force_csr0 = 0;
#ifndef MODULE
- static int did_version; /* Already printed version info. */
- if (tulip_debug > 0 && did_version++ == 0)
- printk (KERN_INFO "%s", version);
+ if (tulip_debug > 0)
+ printk_once(KERN_INFO "%s", version);
#endif
board_idx++;
@@ -1301,7 +1312,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
- printk (KERN_ERR PFX "skipping LMC card.\n");
+ pr_err(PFX "skipping LMC card\n");
return -ENODEV;
}
@@ -1317,15 +1328,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
pdev->revision < 0x30) {
- printk(KERN_INFO PFX
- "skipping early DM9100 with Crc bug (use dmfe)\n");
+ pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
return -ENODEV;
}
dp = pci_device_to_OF_node(pdev);
if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
- printk(KERN_INFO PFX
- "skipping DM910x expansion card (use dmfe)\n");
+ pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
@@ -1372,9 +1381,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
i = pci_enable_device(pdev);
if (i) {
- printk (KERN_ERR PFX
- "Cannot enable tulip board #%d, aborting\n",
- board_idx);
+ pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
return i;
}
@@ -1383,22 +1391,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev) {
- printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ pr_err(PFX "ether device alloc failed, aborting\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
- "aborting\n", pci_name(pdev),
- (unsigned long long)pci_resource_len (pdev, 0),
- (unsigned long long)pci_resource_start (pdev, 0));
+ pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pci_name(pdev),
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
goto err_out_free_netdev;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, "tulip"))
+ if (pci_request_regions (pdev, DRV_NAME))
goto err_out_free_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1611,8 +1619,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (dev->mem_start & MEDIA_MASK)
tp->default_port = dev->mem_start & MEDIA_MASK;
if (tp->default_port) {
- printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
- board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
tp->medialock = 1;
if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
tp->full_duplex = 1;
@@ -1627,7 +1635,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
if (tp->flags & HAS_MEDIA_TABLE) {
- sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
tulip_parse_eeprom(dev);
strcpy(dev->name, "eth%d"); /* un-hack */
}
@@ -1663,20 +1671,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (register_netdev(dev))
goto err_out_free_ring;
- printk(KERN_INFO "%s: %s rev %d at "
+ pci_set_drvdata(pdev, dev);
+
+ dev_info(&dev->dev,
#ifdef CONFIG_TULIP_MMIO
- "MMIO"
+ "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
#else
- "Port"
+ "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
#endif
- " %#llx,", dev->name, chip_name, pdev->revision,
- (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
- pci_set_drvdata(pdev, dev);
-
- if (eeprom_missing)
- printk(" EEPROM not present,");
- printk(" %pM", dev->dev_addr);
- printk(", IRQ %d.\n", irq);
+ chip_name, pdev->revision,
+ (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
+ eeprom_missing ? " EEPROM not present," : "",
+ dev->dev_addr, irq);
if (tp->chip_id == PNIC2)
tp->link_change = pnic2_lnk_change;
@@ -1799,12 +1805,12 @@ static int tulip_resume(struct pci_dev *pdev)
return 0;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+ pr_err(PFX "pci_enable_device failed in resume\n");
return retval;
}
if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk (KERN_ERR "tulip: request_irq failed in resume\n");
+ pr_err(PFX "request_irq failed in resume\n");
return retval;
}
@@ -1874,7 +1880,7 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
#ifdef MODULE
- printk (KERN_INFO "%s", version);
+ pr_info("%s", version);
#endif
/* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc355..216ceb322ed4 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "uli526x"
#define DRV_VERSION "0.9.3"
#define DRV_RELDATE "2005-7-29"
@@ -82,9 +84,16 @@
#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
-#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define ULI526X_DBUG(dbug_now, msg, value) \
+do { \
+ if (uli526x_debug || (dbug_now)) \
+ pr_err("%s %lx\n", (msg), (long) (value)); \
+} while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_err("Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full" : "half");
/* CR9 definition: SROM/MII */
@@ -284,7 +293,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -295,19 +304,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -382,9 +391,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
if (err)
goto err_out_res;
- printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n",
- dev->name,ent->driver_data >> 16,pci_name(pdev),
- dev->dev_addr, dev->irq);
+ dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -516,7 +525,7 @@ static void uli526x_init(struct net_device *dev)
}
}
if(phy_tmp == 32)
- printk(KERN_WARNING "Can not find the phy address!!!");
+ pr_warning("Can not find the phy address!!!");
/* Parser SROM and media mode */
db->media_mode = uli526x_media_mode;
@@ -548,7 +557,7 @@ static void uli526x_init(struct net_device *dev)
update_cr6(db->cr6_data, ioaddr);
/* Send setup frame */
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -582,7 +591,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -592,7 +601,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
return NETDEV_TX_BUSY;
}
@@ -897,16 +906,18 @@ static void uli526x_set_filter_mode(struct net_device * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
- ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1058,7 +1069,7 @@ static void uli526x_timer(unsigned long data)
/* Link Failed */
ULI526X_DBUG(0, "Link Failed", tmp_cr12);
netif_carrier_off(dev);
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1101,11 @@ static void uli526x_timer(unsigned long data)
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
}
else
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
}
netif_carrier_on(dev);
}
@@ -1104,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
{
if(db->init==1)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
netif_carrier_off(dev);
}
}
@@ -1230,8 +1241,7 @@ static int uli526x_resume(struct pci_dev *pdev)
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
- printk(KERN_WARNING "%s: Could not put device into D0\n",
- dev->name);
+ dev_warn(&dev->dev, "Could not put device into D0\n");
return err;
}
@@ -1432,7 +1442,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data, dev->base_addr);
dev->trans_start = jiffies;
} else
- printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+ pr_err("No Tx resource - Send_filter_frame!\n");
}
@@ -1783,7 +1793,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
}
-static struct pci_device_id uli526x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
{ 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f9..98711a9f35ac 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
};
-static const struct pci_device_id w840_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +376,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
- pci_name(pdev));
+ pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
return -EIO;
}
dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +422,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (option & 0x200)
np->mii_if.full_duplex = 1;
if (option & 15)
- printk(KERN_INFO "%s: ignoring user supplied media type %d",
- dev->name, option & 15);
+ dev_info(&dev->dev,
+ "ignoring user supplied media type %d",
+ option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->mii_if.full_duplex = 1;
@@ -440,9 +441,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name, ioaddr,
- dev->dev_addr, irq);
+ dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
@@ -453,16 +453,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
mdio_read(dev, phy, MII_PHYSID2);
- printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
+ dev_info(&dev->dev,
+ "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
+ np->mii, phy, mii_status,
+ np->mii_if.advertising);
}
}
np->mii_cnt = phy_idx;
np->mii_if.phy_id = np->phys[0];
if (phy_idx == 0) {
- printk(KERN_WARNING "%s: MII PHY not found -- this device may "
- "not operate correctly.\n", dev->name);
+ dev_warn(&dev->dev,
+ "MII PHY not found -- this device may not operate correctly\n");
}
}
@@ -644,8 +645,8 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
- dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
+ dev->name, dev->irq);
if((i=alloc_ringdesc(dev)))
goto out_err;
@@ -657,7 +658,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+ printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
/* Set the timer to check for link beat. */
init_timer(&np->timer);
@@ -688,16 +689,18 @@ static int update_link(struct net_device *dev)
if (!(mii_reg & 0x4)) {
if (netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d reports no link. Disabling watchdog\n",
+ np->phys[0]);
netif_carrier_off(dev);
}
return np->csr6;
}
if (!netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d link is back. Enabling watchdog\n",
+ np->phys[0]);
netif_carrier_on(dev);
}
@@ -729,9 +732,10 @@ static int update_link(struct net_device *dev)
if (fasteth)
result |= 0x20000000;
if (result != np->csr6 && debug)
- printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
- duplex ? "full" : "half", np->phys[0]);
+ dev_info(&dev->dev,
+ "Setting %dMBit-%s-duplex based on MII#%d\n",
+ fasteth ? 100 : 10, duplex ? "full" : "half",
+ np->phys[0]);
return result;
}
@@ -763,8 +767,8 @@ static inline void update_csr6(struct net_device *dev, int new)
limit--;
if(!limit) {
- printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
- dev->name, csr5);
+ dev_info(&dev->dev,
+ "couldn't stop rxtx, IntrStatus %xh\n", csr5);
break;
}
udelay(1);
@@ -783,10 +787,9 @@ static void netdev_timer(unsigned long data)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
- "config %8.8x.\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
@@ -899,8 +902,8 @@ static void init_registers(struct net_device *dev)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
- printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
- "alignment to 8 longwords.\n", dev->name);
+ dev_info(&dev->dev,
+ "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
} else {
i |= 0xE000;
}
@@ -931,22 +934,23 @@ static void tx_timeout(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
+ dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
+ ioread32(ioaddr + IntrStatus));
{
int i;
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
- printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %8.8x", np->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %08x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
- printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
- np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
@@ -1055,8 +1059,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
+ dev->name, np->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -1073,8 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
if (tx_status & 0x8000) { /* There was an error, log it. */
#ifndef final_version
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, tx_status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, tx_status);
#endif
np->stats.tx_errors++;
if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1090,8 @@ static void netdev_tx_done(struct net_device *dev)
} else {
#ifndef final_version
if (debug > 3)
- printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
- dev->name, entry, tx_status);
+ printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
+ dev->name, entry, tx_status);
#endif
np->stats.tx_bytes += np->tx_skbuff[entry]->len;
np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1134,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
+ dev->name, intr_status);
if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
break;
@@ -1156,8 +1160,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
netdev_error(dev, intr_status);
if (--work_limit < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n", dev->name, intr_status);
+ dev_warn(&dev->dev,
+ "Too much work at interrupt, status=0x%04x\n",
+ intr_status);
/* Set the timer to re-enable the other interrupts after
10*82usec ticks. */
spin_lock(&np->lock);
@@ -1171,8 +1176,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread32(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1185,8 +1190,8 @@ static int netdev_rx(struct net_device *dev)
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
- printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
- entry, np->rx_ring[entry].status);
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1200,24 @@ static int netdev_rx(struct net_device *dev)
s32 status = desc->status;
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
- status);
+ printk(KERN_DEBUG " netdev_rx() status was %08x\n",
+ status);
if (status < 0)
break;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, entry %#x status %4.4x!\n",
- dev->name, np->cur_rx, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
+ np->cur_rx, status);
np->stats.rx_length_errors++;
}
} else if (status & 0x8000) {
/* There was a fatal error. */
if (debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
np->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) np->stats.rx_length_errors++;
if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- " status %x.\n", pkt_len, status);
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
@@ -1251,11 +1256,10 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (debug > 5)
- printk(KERN_DEBUG " Rx data %pM %pM"
- " %2.2x%2.2x %d.%d.%d.%d.\n",
+ printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
&skb->data[0], &skb->data[6],
skb->data[12], skb->data[13],
- skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
+ &skb->data[14]);
#endif
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1293,8 +1297,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
+ dev->name, intr_status);
if (intr_status == 0xffffffff)
return;
spin_lock(&np->lock);
@@ -1314,8 +1318,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
new = 127; /* load full packet before starting */
new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
#endif
- printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
- dev->name, new);
+ printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
+ dev->name, new);
update_csr6(dev, new);
}
if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1357,7 +1361,7 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
@@ -1366,8 +1370,9 @@ static u32 __set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
@@ -1487,11 +1492,13 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ dev->name,
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
}
/* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1519,16 @@ static int netdev_close(struct net_device *dev)
if (debug > 2) {
int i;
- printk(KERN_DEBUG" Tx ring at %8.8x:\n",
- (int)np->tx_ring);
+ printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
- i, np->tx_ring[i].length,
- np->tx_ring[i].status, np->tx_ring[i].buffer1);
- printk(KERN_DEBUG " Rx ring %8.8x:\n",
- (int)np->rx_ring);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].length,
- np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
}
}
#endif /* __i386__ debugging only */
@@ -1622,9 +1627,8 @@ static int w840_resume (struct pci_dev *pdev)
goto out; /* device not suspended */
if (netif_running(dev)) {
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR
- "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev,
+ "pci_enable_device failed in resume\n");
goto out;
}
spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d6..acfeeb980562 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
* $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
-static struct pci_device_id xircom_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_write_config_word (pdev, PCI_STATUS,tmp16);
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
- printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
+ pr_err("%s: failed to allocate io-region\n", __func__);
return -ENODEV;
}
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
- printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n");
+ pr_err("%s: failed to allocate etherdev\n", __func__);
goto device_fail;
}
private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
+ pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail;
}
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
+ pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail;
}
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
- printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
+ pr_err("%s: netdevice registration failed\n", __func__);
goto reg_fail;
}
- printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
+ dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
+ printk("tx status 0x%08x 0x%08x \n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ printk("rx status 0x%08x 0x%08x \n",
+ card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
if (link_status_changed(card)) {
int newlink;
- printk(KERN_DEBUG "xircom_cb: Link status has changed \n");
+ printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
newlink = link_status(card);
- printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink);
+ dev_info(&dev->dev, "Link is %i mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
/* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n");
+ pr_err("Receiver failed to re-activate\n");
}
leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
/* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n");
+ pr_err("Transmitter failed to re-activate\n");
}
leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
struct sk_buff *skb;
if (pkt_len > 1518) {
- printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len);
+ pr_err("Packet length %i is bogus\n", pkt_len);
pkt_len = 1518;
}
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
if (status & 0x8000) { /* Major error */
- printk(KERN_ERR "Major transmit error status %x \n", status);
+ pr_err("Major transmit error status %x\n", status);
card->tx_buffer[4*descnr] = 0;
netif_wake_queue (dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae24..5adb3d150552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -144,6 +144,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
tfile->tun = tun;
tun->tfile = tfile;
+ tun->socket.file = file;
dev_hold(tun->dev);
sock_hold(tun->socket.sk);
atomic_inc(&tfile->count);
@@ -158,6 +159,7 @@ static void __tun_detach(struct tun_struct *tun)
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
tun->tfile = NULL;
+ tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
@@ -387,7 +389,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->socket.wait);
+ wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
drop:
@@ -743,7 +746,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
len = min_t(int, skb->len, len);
skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += len;
+ total += skb->len;
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -751,34 +754,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
- if (file->f_flags & O_NONBLOCK) {
+ if (noblock) {
ret = -EAGAIN;
break;
}
@@ -805,6 +797,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
current->state = TASK_RUNNING;
remove_wait_queue(&tun->socket.wait, &wait);
+ return ret;
+}
+
+static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
+ ssize_t len, ret;
+
+ if (!tun)
+ return -EBADFD;
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
@@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk)
return;
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(tun_sk(sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ return tun_get_user(tun, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tun_socket_ops = {
+ .sendmsg = tun_sendmsg,
+ .recvmsg = tun_recvmsg,
+};
+
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
@@ -986,6 +1031,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
goto err_free_dev;
init_waitqueue_head(&tun->socket.wait);
+ tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
@@ -1525,6 +1571,23 @@ static void tun_cleanup(void)
rtnl_link_unregister(&tun_link_ops);
}
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tun_get_socket(struct file *file)
+{
+ struct tun_struct *tun;
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tun = tun_get(file);
+ if (!tun)
+ return ERR_PTR(-EBADFD);
+ tun_put(tun);
+ return &tun->socket;
+}
+EXPORT_SYMBOL_GPL(tun_get_socket);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be6..edabc49a49bc 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -215,7 +215,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
-static struct pci_device_id typhoon_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@ -924,17 +924,18 @@ typhoon_set_rx_mode(struct net_device *dev)
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
- } else if((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
- } else if(dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index eb8fe7e16c6c..a05720289c7e 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
- u32 upsmr, maccfg2, tbiBaseAddress;
+ u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- tbiBaseAddress = in_be32(&ug_regs->utbipar);
- tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
- tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
- value = ugeth->phydev->bus->read(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ ugeth_warn("TBI mode requires that the device "
+ "tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ ugeth_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
- ugeth->phydev->bus->write(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -2024,7 +2031,8 @@ static void ucc_geth_set_multi(struct net_device *dev)
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0; i < netdev_mc_count(dev);
+ i++, dmi = dmi->next) {
/* Only support group multicast for now.
*/
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index a516185cbc9f..f02551713b13 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -542,9 +542,9 @@ static void asix_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -558,7 +558,7 @@ static void asix_set_multicast(struct net_device *net)
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
@@ -754,9 +754,9 @@ static void ax88172_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -770,7 +770,7 @@ static void ax88172_set_multicast(struct net_device *net)
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a810..5a13660ebd17 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -648,7 +648,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
+ for (i = 0, mc = netdev->mc_list;
+ mc && i < netdev_mc_count(netdev);
+ i++, mc = mc->next) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
@@ -897,11 +899,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
f5u011_rxmode(catc, catc->rxmode);
}
dbg("Init done.");
- printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ",
+ printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
- usbdev->bus->bus_name, usbdev->devpath);
- for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
- printk("%2.2x.\n", netdev->dev_addr[i]);
+ usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f29..c820fec62041 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -381,13 +381,13 @@ static void dm9601_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x02;
- } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) {
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
- } else if (net->mc_count) {
- struct dev_mc_list *mc_list = net->mc_list;
- int i;
+ } else if (!netdev_mc_empty(net)) {
+ struct dev_mc_list *mc_list;
- for (i = 0; i < net->mc_count; i++, mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, net) {
u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 55cf7081de10..9ab5c1983a7d 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -139,7 +139,7 @@ static void int51x1_set_multicast(struct net_device *netdev)
/* do not expect to see traffic of other PLCs */
filter |= PACKET_TYPE_PROMISCUOUS;
devinfo(dev, "promiscuous mode enabled");
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
filter |= PACKET_TYPE_ALL_MULTICAST;
devdbg(dev, "receive all multicast enabled");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1d64ef67efa..52671ea043a7 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -881,7 +881,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
- else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
+ else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f480..34665137f2c3 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
/*
- * MosChips MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
+ * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (c) 2002-2003 TiVo Inc.
*
+ * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
+ *
+ * TODO:
+ * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
+ * - implement ethtool_ops get_pauseparam/set_pauseparam
+ * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
+ * - implement get_eeprom/[set_eeprom]
+ * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
+ * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
+ * can access only ~ 24, remaining user buffer is uninitialized garbage
+ * - anything else?
+ *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +69,7 @@
ADVERTISE_100HALF | ADVERTISE_10FULL | \
ADVERTISE_10HALF | ADVERTISE_CSMA)
-/* HIF_REG_XX coressponding index value */
+/* HIF_REG_XX corresponding index value */
enum {
HIF_REG_MULTICAST_HASH = 0x00,
HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +83,7 @@ enum {
HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
HIF_REG_CONFIG = 0x0e,
+ /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
HIF_REG_CONFIG_CFG = 0x80,
HIF_REG_CONFIG_SPEED100 = 0x40,
HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +91,24 @@ enum {
HIF_REG_CONFIG_TXENABLE = 0x08,
HIF_REG_CONFIG_SLEEPMODE = 0x04,
HIF_REG_CONFIG_ALLMULTICAST = 0x02,
- HIF_REG_CONFIG_PROMISCIOUS = 0x01,
+ HIF_REG_CONFIG_PROMISCUOUS = 0x01,
HIF_REG_ETHERNET_ADDR = 0x0f,
- HIF_REG_22 = 0x15,
+ HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
HIF_REG_PAUSE_THRESHOLD = 0x16,
HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
};
+/* Trailing status byte in Ethernet Rx frame */
+enum {
+ MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
+ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
+ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
+ MCS7830_RX_CRC_ERROR = 0x08,
+ MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
+ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
+ /* [7:6] reserved */
+};
+
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
@@ -109,7 +135,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
return ret;
}
-static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
struct usb_device *xdev = dev->udev;
int ret;
@@ -183,13 +209,43 @@ out:
usb_free_urb(urb);
}
-static int mcs7830_get_address(struct usbnet *dev)
+static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
{
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- dev->net->dev_addr);
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
+
if (ret < 0)
return ret;
+
+ /* it worked --> adopt it on netdev side */
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
return 0;
}
@@ -307,7 +363,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
{
u8 dummy[2];
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy);
+ ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
if (ret > 0)
return 2; /* Rev C or later */
return 1; /* earlier revision */
@@ -331,33 +387,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
}
}
-static int mcs7830_init_dev(struct usbnet *dev)
-{
- int ret;
- int retry;
-
- /* Read MAC address from EEPROM */
- ret = -EINVAL;
- for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_get_address(dev);
- if (ret) {
- dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
- goto out;
- }
-
- /* Set up PHY */
- ret = mcs7830_set_autoneg(dev, 0);
- if (ret) {
- dev_info(&dev->udev->dev, "Cannot set autoneg\n");
- goto out;
- }
-
- mcs7830_rev_C_fixup(dev);
- ret = 0;
-out:
- return ret;
-}
-
static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
int location)
{
@@ -378,11 +407,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-/* credits go to asix_set_multicast */
-static void mcs7830_set_multicast(struct net_device *net)
+static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
+{
+ return (struct mcs7830_data *)&dev->data;
+}
+
+static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
+{
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
+ sizeof data->multi_filter,
+ data->multi_filter);
+}
+
+static void mcs7830_hif_update_config(struct usbnet *dev)
+{
+ /* implementation specific to data->config
+ (argument needs to be heap-based anyway - USB DMA!) */
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+}
+
+static void mcs7830_data_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct mcs7830_data *data = (struct mcs7830_data *)&dev->data;
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ memset(data->multi_filter, 0, sizeof data->multi_filter);
data->config = HIF_REG_CONFIG_TXENABLE;
@@ -390,11 +441,11 @@ static void mcs7830_set_multicast(struct net_device *net)
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
if (net->flags & IFF_PROMISC) {
- data->config |= HIF_REG_CONFIG_PROMISCIOUS;
+ data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > MCS7830_MAX_MCAST) {
+ netdev_mc_count(net) > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -405,21 +456,51 @@ static void mcs7830_set_multicast(struct net_device *net)
u32 crc_bits;
int i;
- memset(data->multi_filter, 0, sizeof data->multi_filter);
-
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
mc_list = mc_list->next;
}
+ }
+}
- mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
- sizeof data->multi_filter,
- data->multi_filter);
+static int mcs7830_apply_base_config(struct usbnet *dev)
+{
+ int ret;
+
+ /* re-configure known MAC (suspend case etc.) */
+ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set MAC address\n");
+ goto out;
}
- mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+ /* Set up PHY */
+ ret = mcs7830_set_autoneg(dev, 0);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set autoneg\n");
+ goto out;
+ }
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
+
+ mcs7830_rev_C_fixup(dev);
+ ret = 0;
+out:
+ return ret;
+}
+
+/* credits go to asix_set_multicast */
+static void mcs7830_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ mcs7830_data_set_multicast(net);
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
}
static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +544,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
-{
- int ret;
- struct usbnet *dev = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- netdev->dev_addr);
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
@@ -495,21 +553,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_multicast_list = mcs7830_set_multicast,
- .ndo_set_mac_address = mcs7830_set_mac_address,
+ .ndo_set_mac_address = mcs7830_set_mac_address,
};
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
int ret;
+ int retry;
- ret = mcs7830_init_dev(dev);
+ /* Initial startup: Gather MAC address setting from EEPROM */
+ ret = -EINVAL;
+ for (retry = 0; retry < 5 && ret; retry++)
+ ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ if (ret) {
+ dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
+ goto out;
+ }
+
+ mcs7830_data_set_multicast(net);
+
+ ret = mcs7830_apply_base_config(dev);
if (ret)
goto out;
net->ethtool_ops = &mcs7830_ethtool_ops;
net->netdev_ops = &mcs7830_netdev_ops;
- mcs7830_set_multicast(net);
/* reserve space for the status byte on rx */
dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +595,7 @@ out:
return ret;
}
-/* The chip always appends a status bytes that we need to strip */
+/* The chip always appends a status byte that we need to strip */
static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
@@ -539,9 +608,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_trim(skb, skb->len - 1);
status = skb->data[skb->len];
- if (status != 0x20)
+ if (status != MCS7830_RX_FRAME_CORRECT) {
dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
+ /* hmm, perhaps usbnet.c already sees a globally visible
+ frame error and increments rx_errors on its own already? */
+ dev->net->stats.rx_errors++;
+
+ if (status & (MCS7830_RX_SHORT_FRAME
+ |MCS7830_RX_LENGTH_ERROR
+ |MCS7830_RX_LARGE_FRAME))
+ dev->net->stats.rx_length_errors++;
+ if (status & MCS7830_RX_ALIGNMENT_ERROR)
+ dev->net->stats.rx_frame_errors++;
+ if (status & MCS7830_RX_CRC_ERROR)
+ dev->net->stats.rx_crc_errors++;
+ }
+
return skb->len > 0;
}
@@ -580,6 +663,20 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
+static int mcs7830_reset_resume (struct usb_interface *intf)
+{
+ /* YES, this function is successful enough that ethtool -d
+ does show same output pre-/post-suspend */
+
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ mcs7830_apply_base_config(dev);
+
+ usbnet_resume(intf);
+
+ return 0;
+}
+
static struct usb_driver mcs7830_driver = {
.name = driver_name,
.id_table = products,
@@ -587,6 +684,7 @@ static struct usb_driver mcs7830_driver = {
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = mcs7830_reset_resume,
};
static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ed4a508ef262..44ae8f6d3135 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1232,7 +1232,7 @@ static void pegasus_set_multicast(struct net_device *net)
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
pr_info("%s: Promiscuous mode enabled.\n", net->name);
- } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index fd19db0d2504..e85c89c6706d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
rtl8150_t *dev = netdev_priv(netdev);
- int i;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dbg("%s: Setting MAC address to ", netdev->name);
- for (i = 0; i < 5; i++)
- dbg("%02X:", netdev->dev_addr[i]);
- dbg("%02X\n", netdev->dev_addr[i]);
+ dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
+ int i;
u8 cr;
/* Get the CR contents. */
get_registers(dev, CR, 1, &cr);
@@ -714,7 +711,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
dev->rx_creg |= cpu_to_le16(0x0001);
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
dev->rx_creg &= cpu_to_le16(0xfffe);
dev->rx_creg |= cpu_to_le16(0x0002);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 0c3c738d7419..48555d0e374d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -384,7 +384,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
devdbg(dev, "receive all multicast enabled");
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
- } else if (dev->net->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev->net)) {
struct dev_mc_list *mc_list = dev->net->mc_list;
int count = 0;
@@ -406,7 +406,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
mc_list = mc_list->next;
}
- if (count != ((u32)dev->net->mc_count))
+ if (count != ((u32) netdev_mc_count(dev->net)))
devwarn(dev, "mc_count != dev->mc_count");
if (netif_msg_drv(dev))
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b80435955..85df7ac636b5 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static const struct pci_device_id rhine_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
@@ -1697,7 +1697,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
@@ -1707,7 +1707,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 317aa34b21cf..cd4e866321f8 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
@@ -1132,7 +1132,7 @@ static void velocity_set_multi(struct net_device *dev)
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit) ||
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
@@ -1141,7 +1141,9 @@ static void velocity_set_multi(struct net_device *dev)
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
}
@@ -2698,10 +2700,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
}
static u32 velocity_get_link(struct net_device *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c4..ce35b42cc2cb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,10 +56,6 @@ struct virtnet_info
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
- /* Receive & send queues. */
- struct sk_buff_head recv;
- struct sk_buff_head send;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -75,34 +71,44 @@ struct skb_vnet_hdr {
unsigned int num_sg;
};
+struct padded_vnet_hdr {
+ struct virtio_net_hdr hdr;
+ /*
+ * virtio_net_hdr should be in a separated sg buffer because of a
+ * QEMU bug, and data sg buffer shares same page with this header sg.
+ * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+ */
+ char padding[6];
+};
+
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
}
-static void give_a_page(struct virtnet_info *vi, struct page *page)
-{
- page->private = (unsigned long)vi->pages;
- vi->pages = page;
-}
-
-static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+static void give_pages(struct virtnet_info *vi, struct page *page)
{
- unsigned int i;
+ struct page *end;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- give_a_page(vi, skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags = 0;
- skb->data_len = 0;
+ /* Find end of list, sew whole thing into vi->pages. */
+ for (end = page; end->private; end = (struct page *)end->private);
+ end->private = (unsigned long)vi->pages;
+ vi->pages = page;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
- if (p)
+ if (p) {
vi->pages = (struct page *)p->private;
- else
+ /* clear private here, it is used to chain pages */
+ p->private = 0;
+ } else
p = alloc_page(gfp_mask);
return p;
}
@@ -118,99 +124,142 @@ static void skb_xmit_done(struct virtqueue *svq)
netif_wake_queue(vi->dev);
}
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
- unsigned len)
+static void set_skb_frag(struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int *len)
{
- struct virtnet_info *vi = netdev_priv(dev);
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
- int err;
- int i;
+ int i = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *f;
+
+ f = &skb_shinfo(skb)->frags[i];
+ f->size = min((unsigned)PAGE_SIZE - offset, *len);
+ f->page_offset = offset;
+ f->page = page;
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb_shinfo(skb)->nr_frags++;
+ *len -= f->size;
+}
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct page *page, unsigned int len)
+{
+ struct sk_buff *skb;
+ struct skb_vnet_hdr *hdr;
+ unsigned int copy, hdr_len, offset;
+ char *p;
- if (vi->mergeable_rx_bufs) {
- unsigned int copy;
- char *p = page_address(skb_shinfo(skb)->frags[0].page);
+ p = page_address(page);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
-
- memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
- p += sizeof(hdr->mhdr);
+ /* copy small packet so we can reuse these pages for small data */
+ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ if (unlikely(!skb))
+ return NULL;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ hdr = skb_vnet_hdr(skb);
- memcpy(skb_put(skb, copy), p, copy);
+ if (vi->mergeable_rx_bufs) {
+ hdr_len = sizeof hdr->mhdr;
+ offset = hdr_len;
+ } else {
+ hdr_len = sizeof hdr->hdr;
+ offset = sizeof(struct padded_vnet_hdr);
+ }
- len -= copy;
+ memcpy(hdr, p, hdr_len);
- if (!len) {
- give_a_page(vi, skb_shinfo(skb)->frags[0].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(hdr->mhdr) + copy;
- skb_shinfo(skb)->frags[0].size = len;
- skb->data_len += len;
- skb->len += len;
- }
+ len -= hdr_len;
+ p += offset;
- while (--hdr->mhdr.num_buffers) {
- struct sk_buff *nskb;
+ copy = len;
+ if (copy > skb_tailroom(skb))
+ copy = skb_tailroom(skb);
+ memcpy(skb_put(skb, copy), p, copy);
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long %d\n", dev->name,
- len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ len -= copy;
+ offset += copy;
- nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
- if (!nskb) {
- pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, hdr->mhdr.num_buffers);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ while (len) {
+ set_skb_frag(skb, page, offset, &len);
+ page = (struct page *)page->private;
+ offset = 0;
+ }
- __skb_unlink(nskb, &vi->recv);
- vi->num--;
+ if (page)
+ give_pages(vi, page);
- skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
- skb_shinfo(nskb)->nr_frags = 0;
- kfree_skb(nskb);
+ return skb;
+}
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
+static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+{
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct page *page;
+ int num_buf, i, len;
+
+ num_buf = hdr->mhdr.num_buffers;
+ while (--num_buf) {
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long\n", skb->dev->name);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
- skb_shinfo(skb)->frags[i].size = len;
- skb_shinfo(skb)->nr_frags++;
- skb->data_len += len;
- skb->len += len;
+ page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ if (!page) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ skb->dev->name, hdr->mhdr.num_buffers);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
}
- } else {
- len -= sizeof(hdr->hdr);
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ set_skb_frag(skb, page, 0, &len);
+
+ --vi->num;
+ }
+ return 0;
+}
+
+static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ struct skb_vnet_hdr *hdr;
- if (len <= MAX_PACKET_LEN)
- trim_pages(vi, skb);
+ if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ dev->stats.rx_length_errors++;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ return;
+ }
- err = pskb_trim(skb, len);
- if (err) {
- pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
- len, err);
+ if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+ skb = buf;
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+ } else {
+ page = buf;
+ skb = page_to_skb(vi, page, len);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- goto drop;
+ give_pages(vi, page);
+ return;
}
+ if (vi->mergeable_rx_bufs)
+ if (receive_mergeable(vi, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
}
+ hdr = skb_vnet_hdr(skb);
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -267,110 +316,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
frame_err:
dev->stats.rx_frame_errors++;
-drop:
dev_kfree_skb(skb);
}
-static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
- struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err, i;
- bool oom = false;
-
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
- do {
- struct skb_vnet_hdr *hdr;
+ struct skb_vnet_hdr *hdr;
+ struct scatterlist sg[2];
+ int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ if (unlikely(!skb))
+ return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, MAX_PACKET_LEN);
- hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ hdr = skb_vnet_hdr(skb);
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
- if (vi->big_packets) {
- for (i = 0; i < MAX_SKB_FRAGS; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- f->page = get_a_page(vi, gfp);
- if (!f->page)
- break;
+ skb_to_sgvec(skb, sg + 1, 0, skb->len);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ if (err < 0)
+ dev_kfree_skb(skb);
- skb->data_len += PAGE_SIZE;
- skb->len += PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
- }
+static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+ struct page *first, *list = NULL;
+ char *p;
+ int i, err, offset;
+
+ /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ if (list)
+ give_pages(vi, list);
+ return -ENOMEM;
}
+ sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- skb_queue_head(&vi->recv, skb);
+ /* chain new page in list head to match sg */
+ first->private = (unsigned long)list;
+ list = first;
+ }
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
- if (err < 0) {
- skb_unlink(skb, &vi->recv);
- trim_pages(vi, skb);
- kfree_skb(skb);
- break;
- }
- vi->num++;
- } while (err >= num);
- if (unlikely(vi->num > vi->max))
- vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
- return !oom;
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ give_pages(vi, list);
+ return -ENOMEM;
+ }
+ p = page_address(first);
+
+ /* sg[0], sg[1] share the same page */
+ /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
+ sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+
+ /* sg[1] for data packet, from offset */
+ offset = sizeof(struct padded_vnet_hdr);
+ sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+
+ /* chain first in list head */
+ first->private = (unsigned long)list;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ first);
+ if (err < 0)
+ give_pages(vi, first);
+
+ return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
- struct sk_buff *skb;
- struct scatterlist sg[1];
+ struct page *page;
+ struct scatterlist sg;
int err;
- bool oom = false;
- if (!vi->mergeable_rx_bufs)
- return try_fill_recv_maxbufs(vi, gfp);
-
- do {
- skb_frag_t *f;
+ page = get_a_page(vi, gfp);
+ if (!page)
+ return -ENOMEM;
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ sg_init_one(&sg, page_address(page), PAGE_SIZE);
- f = &skb_shinfo(skb)->frags[0];
- f->page = get_a_page(vi, gfp);
- if (!f->page) {
- oom = true;
- kfree_skb(skb);
- break;
- }
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ if (err < 0)
+ give_pages(vi, page);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+{
+ int err;
+ bool oom = false;
- sg_init_one(sg, page_address(f->page), PAGE_SIZE);
- skb_queue_head(&vi->recv, skb);
+ do {
+ if (vi->mergeable_rx_bufs)
+ err = add_recvbuf_mergeable(vi, gfp);
+ else if (vi->big_packets)
+ err = add_recvbuf_big(vi, gfp);
+ else
+ err = add_recvbuf_small(vi, gfp);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err < 0) {
- skb_unlink(skb, &vi->recv);
- kfree_skb(skb);
+ oom = true;
break;
}
- vi->num++;
+ ++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
@@ -407,15 +465,14 @@ static void refill_work(struct work_struct *work)
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
- struct sk_buff *skb = NULL;
+ void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
- (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
- __skb_unlink(skb, &vi->recv);
- receive_skb(vi->dev, skb, len);
- vi->num--;
+ (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ receive_buf(vi->dev, buf, len);
+ --vi->num;
received++;
}
@@ -445,7 +502,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- __skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -495,9 +551,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -528,15 +584,6 @@ again:
}
vi->svq->vq_ops->kick(vi->svq);
- /*
- * Put new one in send queue. You'd expect we'd need this before
- * xmit_skb calls add_buf(), since the callback can be triggered
- * immediately after that. But since the callback just triggers
- * another call back here, normal network xmit locking prevents the
- * race.
- */
- __skb_queue_head(&vi->send, skb);
-
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
nf_reset(skb);
@@ -674,6 +721,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
+ int uc_count;
+ int mc_count;
void *buf;
int i;
@@ -700,9 +749,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
+ uc_count = netdev_uc_count(dev);
+ mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
- (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+ (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
@@ -711,24 +763,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = dev->uc.count;
+ mac_data->entries = uc_count;
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
- sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
+ sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
- mac_data = (void *)&mac_data->macs[dev->uc.count][0];
+ mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = dev->mc_count;
+ mac_data->entries = mc_count;
addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, addr = addr->next)
+ for (i = 0; i < mc_count; i++, addr = addr->next)
memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
- sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -915,10 +967,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
- /* Initialize our empty receive and send queues. */
- skb_queue_head_init(&vi->recv);
- skb_queue_head_init(&vi->send);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -951,26 +999,42 @@ free:
return err;
}
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+ void *buf;
+ while (1) {
+ buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ if (!buf)
+ break;
+ dev_kfree_skb(buf);
+ }
+ while (1) {
+ buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ if (!buf)
+ break;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ --vi->num;
+ }
+ BUG_ON(vi->num != 0);
+}
+
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- struct sk_buff *skb;
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- /* Free our skbs in send and recv queues, if any. */
- while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
- kfree_skb(skb);
- vi->num--;
- }
- __skb_queue_purge(&vi->send);
-
- BUG_ON(vi->num != 0);
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
+ /* Free unused buffers in both send and recv, if any. */
+ free_unused_bufs(vi);
+
vdev->config->del_vqs(vi->vdev);
while (vi->pages)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d77..ee1b397417f3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
* PCI Device ID Table
* Last entry must be all 0s
*/
-static const struct pci_device_id vmxnet3_pciid_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
@@ -1668,7 +1668,7 @@ static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
- u32 sz = netdev->mc_count * ETH_ALEN;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
@@ -1678,7 +1678,7 @@ vmxnet3_copy_mc(struct net_device *netdev)
int i;
struct dev_mc_list *mc = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
BUG_ON(!mc);
memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
@@ -1708,12 +1708,12 @@ vmxnet3_set_mc(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
- netdev->mc_count * ETH_ALEN);
+ netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b9685e82f7b6..c248b01218a1 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
"Virtualized Server Adapter");
-static struct pci_device_id vxge_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -1178,11 +1178,11 @@ static void vxge_set_multicast(struct net_device *dev)
memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && dev->mc_count) {
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list;
- if ((dev->mc_count +
+ if ((netdev_mc_count(dev) +
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
vdev->vpaths[0].max_mac_addr_cnt)
goto _set_all_mcast;
@@ -1217,7 +1217,7 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
@@ -4297,10 +4297,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
vdev->ndev->name, ll_config.device_hw_info.product_desc);
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
- vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
- macaddr[3], macaddr[4], macaddr[5]);
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca4..f88c07c13197 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
__setup("dscc4.setup=", dscc4_setup);
#endif
-static struct pci_device_id dscc4_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e3649157..40d724a8e020 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
/*
* PCI ID lookup table
*/
-static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c820..b27850377121 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
static int LMC_PKT_BUF_SZ = 1542;
-static struct pci_device_id lmc_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d3955420..f4f1c00d0d23 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
#undef PC300_DEBUG_RX
#undef PC300_DEBUG_OTHER
-static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
/* PC300/RSV or PC300/X21, 2 chan */
{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
/* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd94..c7ab3becd261 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf022..e2cff64a446a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624ee..541c700dceef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09de..6cead321bc15 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
/* Extract MAC addresss */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
- d_printf(2, dev, "GET DEVICE INFO: mac addr "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
if (!memcmp(net_dev->perm_addr, ddi->mac_address,
sizeof(ddi->mac_address)))
goto ok;
dev_warn(dev, "warning: device reports a different MAC address "
"to that of boot mode's\n");
- dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
- dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
- net_dev->perm_addr[0], net_dev->perm_addr[1],
- net_dev->perm_addr[2], net_dev->perm_addr[3],
- net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299ca..e803a7dc6502 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
- d_printf(2, dev,
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ "mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4ff..547912e6843f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -302,18 +302,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct adm8211_priv *priv = dev->priv;
-
- stats[0].len = priv->cur_tx - priv->dirty_tx;
- stats[0].limit = priv->tx_ring_size - 2;
- stats[0].count = priv->dirty_tx;
-
- return 0;
-}
-
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
@@ -1400,15 +1388,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
@@ -1416,8 +1404,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
ADM8211_IDLE();
- ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr));
- ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
+ ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
@@ -1427,7 +1415,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
@@ -1773,7 +1761,6 @@ static const struct ieee80211_ops adm8211_ops = {
.prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
- .get_tx_stats = adm8211_get_tx_stats,
.get_tsf = adm8211_get_tsft
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc6..c22a34c7639c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -57,7 +57,7 @@
#define DRV_NAME "airo"
#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
{ 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
@@ -2310,7 +2310,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
airo_set_promisc(ai);
}
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* Turn on multicast. (Should be already setup...) */
}
}
@@ -5254,11 +5254,7 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
WepKeyRid wkr;
int rc;
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
- return -1;
- }
+ WARN_ON(keylen == 0);
memset(&wkr, 0, sizeof(wkr));
wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6405,11 +6401,7 @@ static int airo_set_encode(struct net_device *dev,
if (dwrq->length > MIN_KEY_SIZE)
key.len = MAX_KEY_SIZE;
else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
+ key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
@@ -6590,12 +6582,22 @@ static int airo_set_encodeext(struct net_device *dev,
default:
return -EINVAL;
}
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
+ if (key.len == 0) {
+ rc = set_wep_tx_idx(local, idx, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP transmit index to %d: %d.",
+ idx, rc);
+ return rc;
+ }
+ } else {
+ rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP key at index %d: %d.",
+ idx, rc);
+ return rc;
+ }
}
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3ebe..0fb419936dff 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
}
static int at76_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct at76_priv *priv = hw->priv;
int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mtx);
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->iw_mode = IW_MODE_INFRA;
break;
@@ -1814,7 +1814,7 @@ exit:
}
static void at76_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d82..8c8ce67971e9 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_MAX_BA_RETRY 5
#define AR9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,12 @@ struct ar9170_sta_tid {
u16 tid;
enum ar9170_tid_state state;
bool active;
- u8 retry;
+};
+
+struct ar9170_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
};
#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +158,8 @@ struct ar9170_sta_tid {
#define AR9170_NUM_TX_STATUS 128
#define AR9170_NUM_TX_AGG_MAX 30
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -211,7 +217,7 @@ struct ar9170 {
/* qos queue settings */
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[5];
+ struct ar9170_tx_queue_stats tx_stats[5];
struct ieee80211_tx_queue_params edcf[5];
spinlock_t cmdlock;
@@ -248,13 +254,8 @@ struct ar9170_sta_info {
unsigned int ampdu_max_len;
};
-#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
-#define AR9170_TX_FLAG_NO_ACK BIT(1)
-#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
-
struct ar9170_tx_info {
unsigned long timeout;
- unsigned int flags;
};
#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d8400..0a1d4c28e68a 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
#define AR9170_TX_MAC_RATE_PROBE 0x8000
/* either-or */
+#define AR9170_TX_PHY_MOD_MASK 0x00000003
#define AR9170_TX_PHY_MOD_CCK 0x00000000
#define AR9170_TX_PHY_MOD_OFDM 0x00000001
#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79e..857e86104295 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
ar->edcf[0].txop | ar->edcf[1].txop << 16);
ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
- ar->edcf[1].txop | ar->edcf[3].txop << 16);
+ ar->edcf[2].txop | ar->edcf[3].txop << 16);
ar9170_regwrite_finish();
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013e..91797cb6e0e8 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
+static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
static inline u16 ar9170_get_tid(struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
-
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+ return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
}
#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
"mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
+ ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
jiffies_to_msecs(arinfo->timeout - jiffies));
}
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[queue].len--;
- if (skb_queue_empty(&ar->tx_pending[queue])) {
+ if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status[queue], skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: unsupported frame flags!\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- dev_kfree_skb_any(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ar9170_tx_ampdu_callback(ar, skb);
+ } else {
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+ skb_queue_tail(&ar->tx_status[queue], skb);
+ }
}
if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
-
- goto out;
- }
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
/*
* WARNING:
* Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
- } else {
- arinfo->flags = AR9170_TX_FLAG_NO_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (unlikely(!info->control.sta))
+ goto err_out;
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ } else {
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ }
}
-out:
return 0;
err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
* tell the FW/HW that this is the last frame,
* that way it will wait for the immediate block ack.
*/
- if (likely(skb_peek_tail(&agg)))
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
+ ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
#ifdef AR9170_TXAGG_DEBUG
printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
+ skb_queue_len(&ar->tx_pending[i]));
+
+ if (remaining_space < frames) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
+ "remaining slots:%d, needed:%d\n",
+ wiphy_name(ar->hw->wiphy), i, remaining_space,
+ frames);
+#endif /* AR9170_QUEUE_DEBUG */
+ frames = remaining_space;
+ }
+
+ ar->tx_stats[i].len += frames;
+ ar->tx_stats[i].count += frames;
if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_stop_queue(ar->hw, i);
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- continue;
- }
-
- frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
- skb_queue_len(&ar->tx_pending[i]));
-
- if (remaining_space < frames) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
- "remaining slots:%d, needed:%d\n",
- wiphy_name(ar->hw->wiphy), i, remaining_space,
- frames);
-#endif /* AR9170_QUEUE_DEBUG */
- frames = remaining_space;
}
- ar->tx_stats[i].len += frames;
- ar->tx_stats[i].count += frames;
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_inc(&ar->tx_ampdu_pending);
#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_pending);
frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
goto unlock;
}
- ar->vif = conf->vif;
- memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
+ ar->vif = vif;
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
}
static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
@@ -2366,7 +2355,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
sta_info->agg[i].active = false;
sta_info->agg[i].ssn = 0;
- sta_info->agg[i].retry = 0;
sta_info->agg[i].tid = i;
INIT_LIST_HEAD(&sta_info->agg[i].list);
skb_queue_head_init(&sta_info->agg[i].queue);
@@ -2408,18 +2396,6 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *tx_stats)
-{
- struct ar9170 *ar = hw->priv;
-
- spin_lock_bh(&ar->tx_stats_lock);
- memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
- spin_unlock_bh(&ar->tx_stats_lock);
-
- return 0;
-}
-
static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
@@ -2521,7 +2497,6 @@ static const struct ieee80211_ops ar9170_ops = {
.set_key = ar9170_set_key,
.sta_notify = ar9170_sta_notify,
.get_stats = ar9170_get_stats,
- .get_tx_stats = ar9170_get_tx_stats,
.ampdu_action = ar9170_ampdu_action,
};
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d924057..0f361186b78f 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
{ USB_DEVICE(0x0cde, 0x0026) },
+ /* Sphairon Homelink 1202 */
+ { USB_DEVICE(0x0cde, 0x0027) },
/* Arcadyan WN7512 */
{ USB_DEVICE(0x083a, 0xf522) },
/* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 9e05648356fe..71fc960814f0 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -74,7 +74,6 @@ struct ath_common;
struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
- void (*cleanup)(struct ath_common *common);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a96761111..ac67f02e26d8 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -535,13 +535,12 @@ struct ath5k_txq_info {
u32 tqi_cbr_period; /* Constant bit rate period */
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Not used */
+ u32 tqi_ready_time; /* Time queue waits after an event */
};
/*
* Transmit packet types.
* used on tx control descriptor
- * TODO: Use them inside base.c corectly
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -1063,6 +1062,7 @@ struct ath5k_hw {
u32 ah_cw_min;
u32 ah_cw_max;
u32 ah_limit_tx_retries;
+ u8 ah_coverage_class;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1200,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
/* Protocol Control Unit Functions */
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
+extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1232,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1315,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
* Functions used internaly
*/
-/*
- * Translate usec to hw clock units
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
- return turbo ? (usec * 80) : (usec * 40);
-}
-
-/*
- * Translate hw clock units to usec
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
- return turbo ? (clock / 80) : (clock / 40);
-}
-
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0ee..8dce0077b023 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
/* Known PCI ids */
-static const struct pci_device_id ath5k_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
static int ath5k_start(struct ieee80211_hw *hw);
static void ath5k_stop(struct ieee80211_hw *hw);
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static void ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mc_list);
@@ -241,8 +241,6 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -254,6 +252,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class);
static const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -267,13 +267,13 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
.conf_tx = NULL,
- .get_tx_stats = ath5k_get_tx_stats,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
.bss_info_changed = ath5k_bss_info_changed,
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
+ .set_coverage_class = ath5k_set_coverage_class,
};
/*
@@ -1246,6 +1246,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
}
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
struct ath5k_txq *txq)
@@ -1300,7 +1323,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
+ ieee80211_get_hdrlen_from_skb(skb),
+ get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1329,7 +1353,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
- sc->tx_stats[txq->qnum].len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
@@ -1513,7 +1536,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
if (ret)
- return ret;
+ goto err;
+
if (sc->opmode == NL80211_IFTYPE_AP ||
sc->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
@@ -1540,10 +1564,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
if (ret) {
ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
- return ret;
+ goto err;
}
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
+
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
}
static void
@@ -1562,7 +1601,6 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
ath5k_txbuf_free(sc, bf);
spin_lock_bh(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock_bh(&sc->txbuflock);
@@ -1992,10 +2030,8 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
}
ieee80211_tx_status(sc->hw, skb);
- sc->tx_stats[txq->qnum].count++;
spin_lock(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock(&sc->txbuflock);
@@ -2773,7 +2809,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
}
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
int ret;
@@ -2784,22 +2820,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- sc->vif = conf->vif;
+ sc->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
- sc->opmode = conf->type;
+ sc->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
- ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
+ ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
ret = 0;
@@ -2810,13 +2846,13 @@ end:
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
u8 mac[ETH_ALEN] = {};
mutex_lock(&sc->lock);
- if (sc->vif != conf->vif)
+ if (sc->vif != vif)
goto end;
ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3097,17 +3133,6 @@ ath5k_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int
-ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct ath5k_softc *sc = hw->priv;
-
- memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats));
-
- return 0;
-}
-
static u64
ath5k_get_tsf(struct ieee80211_hw *hw)
{
@@ -3262,3 +3287,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 952b3a21bbc3..7e1a88a5abdb 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -117,7 +117,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 60f547503d75..67aa52e9bf94 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,6 +77,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ /* LiteOn AR5BXB63 (magooz@salug.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 (shahar@shahar-or.co.il) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d9..aefe84f9c04b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
}
/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
}
/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
+ * ath5k_hw_htoclock - Translate usec to hw clock units
+ *
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ return usec * ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ return clock / ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_get_clockrate - Get the clock rate for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ int clock;
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Clock rate in turbo modes is twice the normal rate */
+ if (channel->hw_value & CHANNEL_TURBO)
+ clock *= 2;
+
+ return clock;
+}
+
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 6; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_CCK)
+ return 20; /* 802.11b */
+
+ return 9; /* 802.11 a/g */
+}
+
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 8; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ return 16; /* 802.11a */
+
+ return 10; /* 802.11 b/g */
+}
+
+/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
return 0;
}
+/**
+ * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ */
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef206..9122a8556f45 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
break;
case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
@@ -520,12 +521,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
*/
unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
{
+ unsigned int slot_time_clock;
+
ATH5K_TRACE(ah->ah_sc);
+
if (ah->ah_version == AR5K_AR5210)
- return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
- AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
else
- return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+
+ return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
}
/*
@@ -533,15 +538,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+
ATH5K_TRACE(ah->ah_sc);
- if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
- ah->ah_turbo), AR5K_SLOT_TIME);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
else
- ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc77869..a35a7db0fc4c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
!(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
- * ALGO: coef = (5 * clock * carrier_freq) / 2)
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
/* TODO: Half/quarter rate */
- clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
-
+ clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
/*
* Configure QCUs/DCUs
*/
@@ -1371,8 +1374,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists */
- if (ah->ah_version == AR5K_AR5212)
- ath5k_hw_set_sleep_clock(ah, true);
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
/*
* Disable beacons and reset the register
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a9..6b50d5eb9ec3 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
main.o \
recv.o \
xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137ab..ca4994f13151 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,12 +27,6 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-static void ath_ahb_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *)common->priv;
- iounmap(sc->mem);
-}
-
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_softc *sc = (struct ath_softc *)common->priv;
@@ -54,8 +48,6 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
static struct ath_bus_ops ath_ahb_bus_ops = {
.read_cachesize = ath_ahb_read_cachesize,
- .cleanup = ath_ahb_cleanup,
-
.eeprom_read = ath_ahb_eeprom_read,
};
@@ -121,16 +113,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
+
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
- ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- goto err_detach;
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
}
ah = sc->sc_ah;
@@ -143,8 +138,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
return 0;
- err_detach:
- ath_detach(sc);
+ err_irq:
+ free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
@@ -161,8 +156,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1597a42731ed..83c7ea4c007f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath9k_enable_ps(struct ath_softc *sc);
/********/
/* VIFs */
@@ -341,6 +342,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_ani_calibrate(unsigned long data);
+
+/**********/
+/* BTCOEX */
+/**********/
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
@@ -358,9 +365,14 @@ struct ath_btcoex {
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
+ u32 btscan_no_stomp; /* in usec */
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
};
+int ath_init_btcoex_timer(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+
/********************/
/* LED Control */
/********************/
@@ -385,6 +397,9 @@ struct ath_led {
bool registered;
};
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -403,26 +418,29 @@ struct ath_led {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_FULL_RESET BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_WAIT_FOR_BEACON BIT(12)
-#define SC_OP_LED_ON BIT(13)
-#define SC_OP_SCANNING BIT(14)
-#define SC_OP_TSF_RESET BIT(15)
-#define SC_OP_WAIT_FOR_CAB BIT(16)
-#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
-#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
-#define SC_OP_BEACON_SYNC BIT(19)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
-#define SC_OP_PS_ENABLED BIT(23)
+#define SC_OP_INVALID BIT(0)
+#define SC_OP_BEACONS BIT(1)
+#define SC_OP_RXAGGR BIT(2)
+#define SC_OP_TXAGGR BIT(3)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_LED_ON BIT(9)
+#define SC_OP_SCANNING BIT(10)
+#define SC_OP_TSF_RESET BIT(11)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
+#define SC_OP_BT_SCAN BIT(13)
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_NULLFUNC_COMPLETED BIT(5)
+#define PS_ENABLED BIT(6)
struct ath_wiphy;
struct ath_rate_table;
@@ -453,16 +471,17 @@ struct ath_softc {
int irq;
spinlock_t sc_resetlock;
spinlock_t sc_serial_rw;
- spinlock_t ani_lock;
spinlock_t sc_pm_lock;
struct mutex mutex;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
+ u16 ps_flags; /* PS_* */
u16 curtxpow;
u8 nbcnvifs;
u16 nvifs;
bool ps_enabled;
+ bool ps_idle;
unsigned long ps_usecount;
enum ath9k_int imask;
@@ -509,6 +528,7 @@ struct ath_wiphy {
int chan_is_ht;
};
+void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -519,21 +539,16 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-static inline void ath_bus_cleanup(struct ath_common *common)
-{
- common->bus_ops->cleanup(common);
-}
-
extern struct ieee80211_ops ath9k_ops;
+extern int modparam_nohwcrypt;
irqreturn_t ath_isr(int irq, void *dev);
-void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
-void ath_detach(struct ath_softc *sc);
+void ath9k_deinit_device(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +557,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -583,4 +599,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+
+void ath_start_rfkill_poll(struct ath_softc *sc);
+extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf5..d088ebfe63a6 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = COMMIT; /* commit next beacon */
sc->beacon.slotupdate = slot;
} else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
- ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime);
+ ah->slottime = sc->beacon.slottime;
+ ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
@@ -576,6 +577,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
u64 tsf;
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ /* No need to configure beacon if we are not associated */
+ if (!common->curaid) {
+ ath_print(common, ATH_DBG_BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return;
+ }
+
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
@@ -738,7 +746,6 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
enum nl80211_iftype iftype;
/* Setup the beacon configuration parameters */
-
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ba31a73317c..1ee5a15ccbb1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -25,10 +25,12 @@
#define ATH_BTCOEX_DEF_BT_PERIOD 45
#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
+#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
#define ATH_BTCOEX_BMISS_THRESH 50
#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
#define ATH_BT_CNT_THRESHOLD 3
+#define ATH_BT_CNT_SCAN_THRESHOLD 15
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b9..42d2a506845a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -75,17 +75,24 @@ static const struct file_operations fops_debug = {
#endif
+#define DMA_BUF_LEN 1024
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
- char buf[1024];
+ char *buf;
+ int retval;
unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
+ buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
@@ -93,20 +100,20 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
@@ -120,7 +127,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
@@ -128,35 +135,37 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return retval;
}
static const struct file_operations fops_dma = {
@@ -289,23 +298,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (sc->cur_rate_table == NULL)
return 0;
- max = 80 + sc->cur_rate_table->rate_cnt * 64;
+ max = 80 + sc->cur_rate_table->rate_cnt * 1024;
buf = kmalloc(max + 1, GFP_KERNEL);
if (buf == NULL)
return 0;
buf[max] = 0;
- len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
- "Retries", "XRetries", "PER");
+ len += sprintf(buf, "%6s %6s %6s "
+ "%10s %10s %10s %10s\n",
+ "HT", "MCS", "Rate",
+ "Success", "Retries", "XRetries", "PER");
for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
+ char mcs[5];
+ char htmode[5];
+ int used_mcs = 0, used_htmode = 0;
+
+ if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
+ used_mcs = snprintf(mcs, 5, "%d",
+ sc->cur_rate_table->info[i].ratecode);
+
+ if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT40");
+ else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT20");
+ else
+ used_htmode = snprintf(htmode, 5, "????");
+ }
+
+ mcs[used_mcs] = '\0';
+ htmode[used_htmode] = '\0';
len += snprintf(buf + len, max - len,
- "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
- (ratekbps % 1000) / 100, stats->success,
- stats->retries, stats->xretries,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
stats->per);
}
@@ -554,6 +589,116 @@ static const struct file_operations fops_xmit = {
.owner = THIS_MODULE
};
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
+
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1152;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "CRC ERR",
+ sc->debug.stats.rxstats.crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT CRC ERR",
+ sc->debug.stats.rxstats.decrypt_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PHY ERR",
+ sc->debug.stats.rxstats.phy_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "MIC ERR",
+ sc->debug.stats.rxstats.mic_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ sc->debug.stats.rxstats.pre_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "POST-DELIM CRC ERR",
+ sc->debug.stats.rxstats.post_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT BUSY ERR",
+ sc->debug.stats.rxstats.decrypt_busy_err);
+
+ PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
+#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+
+ struct ath_desc *ds = bf->bf_desc;
+ u32 phyerr;
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ RX_STAT_INC(crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ RX_STAT_INC(decrypt_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ RX_STAT_INC(mic_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_STAT_INC(pre_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_STAT_INC(post_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_STAT_INC(decrypt_busy_err);
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ RX_STAT_INC(phy_err);
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ RX_PHY_ERR_INC(phyerr);
+ }
+
+#undef RX_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +751,13 @@ int ath9k_init_debug(struct ath_hw *ah)
if (!sc->debug.debugfs_xmit)
goto err;
+ sc->debug.debugfs_recv = debugfs_create_file("recv",
+ S_IRUSR,
+ sc->debug.debugfs_phy,
+ sc, &fops_recv);
+ if (!sc->debug.debugfs_recv)
+ goto err;
+
return 0;
err:
ath9k_exit_debug(ah);
@@ -617,6 +769,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
+ debugfs_remove(sc->debug.debugfs_recv);
debugfs_remove(sc->debug.debugfs_xmit);
debugfs_remove(sc->debug.debugfs_wiphy);
debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee11..86780e68b31e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
u32 delim_underrun;
};
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ */
+struct ath_rx_stats {
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
};
struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
struct dentry *debugfs_rcstat;
struct dentry *debugfs_wiphy;
struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
struct ath_stats stats;
};
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
{
}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+}
+
static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000000..deab8beb0680
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+static void ath_led_blink_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ ath_led_blink_work.work);
+
+ if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
+ return;
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work,
+ (sc->sc_flags & SC_OP_LED_ON) ?
+ msecs_to_jiffies(sc->led_off_duration) :
+ msecs_to_jiffies(sc->led_on_duration));
+
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ sc->led_on_cnt = sc->led_off_cnt = 0;
+ if (sc->sc_flags & SC_OP_LED_ON)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ else
+ sc->sc_flags |= SC_OP_LED_ON;
+}
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath_softc *sc = led->sc;
+
+ switch (brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ } else {
+ sc->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ sc->sc_flags |= SC_OP_LED_ON;
+ } else {
+ sc->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->sc = sc;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+ return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ ath_unregister_led(&sc->assoc_led);
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ ath_unregister_led(&sc->tx_led);
+ ath_unregister_led(&sc->rx_led);
+ ath_unregister_led(&sc->radio_led);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else
+ sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(sc->hw);
+ snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->radio_led, trigger);
+ sc->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(sc->hw);
+ snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->assoc_led, trigger);
+ sc->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(sc->hw);
+ snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->tx_led, trigger);
+ sc->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(sc->hw);
+ snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->rx_led, trigger);
+ sc->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ ath_deinit_leds(sc);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT scan detected");
+ sc->sc_flags |= (SC_OP_BT_SCAN |
+ SC_OP_BT_PRIORITY_DETECTED);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected");
+ sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT);
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
+ break;
+ }
+
+ ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+ if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ ath9k_hw_gen_timer_stop(ah, timer);
+
+ /* if no timer is enabled, turn off interrupt mask */
+ if (timer_table->timer_mask.val == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 timer_period;
+ bool is_btscan;
+
+ ath_detect_bt_priority(sc);
+
+ is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
+ btcoex->bt_stomp_type);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ timer_period = is_btscan ? btcoex->btscan_no_stomp :
+ btcoex->btcoex_no_stomp;
+ ath9k_gen_timer_start(ah,
+ btcoex->no_stomp_timer,
+ (ath9k_hw_gettsf32(ah) +
+ timer_period), timer_period * 10);
+ btcoex->hw_timer_enabled = true;
+ }
+
+ mod_timer(&btcoex->period_timer, jiffies +
+ msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "no stomp timer running \n");
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+
+ btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+ ath_btcoex_no_stomp_timer,
+ ath_btcoex_no_stomp_timer,
+ (void *) sc, AR_FIRST_NDP_TIMER);
+
+ if (!btcoex->no_stomp_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ del_timer_sync(&btcoex->period_timer);
+
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ btcoex->hw_timer_enabled = false;
+}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ae371448b5a0..f00f5c744f48 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
/* Helper Functions */
/********************/
-static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (!ah->curchan) /* should really check for CCK instead */
- return clks / ATH9K_CLOCK_RATE_CCK;
- if (conf->channel->band == IEEE80211_BAND_2GHZ)
- return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
-
- return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
-}
-
-static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (conf_is_ht40(conf))
- return ath9k_hw_mac_usec(ah, clks) / 2;
- else
- return ath9k_hw_mac_usec(ah, clks);
-}
-
static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +321,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
return true;
}
-static const char *ath9k_hw_devname(u16 devid)
-{
- switch (devid) {
- case AR5416_DEVID_PCI:
- return "Atheros 5416";
- case AR5416_DEVID_PCIE:
- return "Atheros 5418";
- case AR9160_DEVID_PCI:
- return "Atheros 9160";
- case AR5416_AR9100_DEVID:
- return "Atheros 9100";
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- return "Atheros 9280";
- case AR9285_DEVID_PCIE:
- return "Atheros 9285";
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- return "Atheros 9287";
- }
-
- return NULL;
-}
-
static void ath9k_hw_init_config(struct ath_hw *ah)
{
int i;
@@ -380,7 +334,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.ht_enable = 1;
ah->config.ofdm_trig_low = 200;
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
@@ -392,7 +345,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- ah->config.intr_mitigation = true;
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ ah->config.ht_enable = 1;
+ else
+ ah->config.ht_enable = 0;
+
+ ah->config.rx_intr_mitigation = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +395,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
- ah->acktimeout = (u32) -1;
- ah->ctstimeout = (u32) -1;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -590,6 +546,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
case AR9271_USB:
+ case AR2427_DEVID_PCIE:
return true;
default:
break;
@@ -1183,7 +1140,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,34 +1160,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
}
-static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad ack timeout %u\n", us);
- ah->acktimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
- ah->acktimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
}
-static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad cts timeout %u\n", us);
- ah->ctstimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
- ah->ctstimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
+}
+
+static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
}
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1247,31 +1195,48 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
}
}
-static void ath9k_hw_init_user_settings(struct ath_hw *ah)
+void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ int acktimeout;
+ int slottime;
+ int sifstime;
+
ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (ah->misc_mode != 0)
REG_WRITE(ah, AR_PCU_MISC,
REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
- if (ah->slottime != (u32) -1)
- ath9k_hw_setslottime(ah, ah->slottime);
- if (ah->acktimeout != (u32) -1)
- ath9k_hw_set_ack_timeout(ah, ah->acktimeout);
- if (ah->ctstimeout != (u32) -1)
- ath9k_hw_set_cts_timeout(ah, ah->ctstimeout);
+
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
+ sifstime = 16;
+ else
+ sifstime = 10;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime = ah->slottime + 3 * ah->coverage_class;
+ acktimeout = slottime + sifstime;
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+ * initval's 64us ack timeout value.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ acktimeout += 64 - sifstime - ah->slottime;
+
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
}
+EXPORT_SYMBOL(ath9k_hw_init_global_settings);
-const char *ath9k_hw_probe(u16 vendorid, u16 devid)
-{
- return vendorid == ATHEROS_VENDOR_ID ?
- ath9k_hw_devname(devid) : NULL;
-}
-
-void ath9k_hw_detach(struct ath_hw *ah)
+void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1289,7 +1254,7 @@ free_hw:
kfree(ah);
ah = NULL;
}
-EXPORT_SYMBOL(ath9k_hw_detach);
+EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
@@ -2090,7 +2055,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_enable_rfkill(ah);
- ath9k_hw_init_user_settings(ah);
+ ath9k_hw_init_global_settings(ah);
if (AR_SREV_9287_12_OR_LATER(ah)) {
REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2120,7 +2085,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
@@ -2780,7 +2745,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RX;
}
@@ -2913,7 +2878,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
}
if (ints & ATH9K_INT_RX) {
mask |= AR_IMR_RXERR;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
else
mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3687,21 +3652,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
}
EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
-{
- if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad slot time %u\n", us);
- ah->slottime = (u32) -1;
- return false;
- } else {
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
- ah->slottime = us;
- return true;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_setslottime);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616f..dbbf7ca5f97d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -40,6 +40,7 @@
#define AR9280_DEVID_PCI 0x0029
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
#define AR5416_AR9100_DEVID 0x000b
@@ -212,7 +213,7 @@ struct ath9k_ops_config {
u32 cck_trig_low;
u32 enable_ani;
int serialize_regmode;
- bool intr_mitigation;
+ bool rx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +552,9 @@ struct ath_hw {
u32 *bank6Temp;
int16_t txpower_indexoffset;
+ int coverage_class;
u32 beacon_interval;
u32 slottime;
- u32 acktimeout;
- u32 ctstimeout;
u32 globaltxtimeout;
/* ANI */
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
-void ath9k_hw_detach(struct ath_hw *ah);
+void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
@@ -668,7 +668,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
+void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000000..623c2f884987
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+/* We use the hw_value as an index into our private channel structure */
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_ioread32,
+ .write = ath9k_iowrite32,
+};
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void setup_ht_cap(struct ath_softc *sc,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 tx_streams, rx_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+ 1 : 2;
+ rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+ 1 : 2;
+
+ if (tx_streams != rx_streams) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ ht_info->mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ ht_info->mcs.rx_mask[1] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "ath_desc not DWORD aligned\n");
+ BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kzalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ dd->dd_bufptr = bf;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+fail:
+ memset(dd, 0, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+static void ath9k_init_crypto(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = sc->sc_ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(sc->sc_ah, (u16) i);
+
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+
+}
+
+static int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ int r, qnum;
+
+ switch (sc->sc_ah->btcoex_hw.scheme) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
+ sc->tx.hwq_map[i] = -1;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ if (sc->beacon.beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup a beacon xmit queue\n");
+ goto err;
+ }
+
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->beacon.cabq == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ return -EIO;
+}
+
+static void ath9k_init_channels_rates(struct ath_softc *sc)
+{
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ sc->config.txpowlimit = ATH_TXPOWER_MAX;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_flags |= SC_OP_TXAGGR;
+ sc->sc_flags |= SC_OP_RXAGGR;
+ }
+
+ common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
+ common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ sc->beacon.bslot[i] = NULL;
+ sc->beacon.bslot_aphy[i] = NULL;
+ }
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = subsysid;
+ sc->sc_ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&sc->wiphy_lock);
+ spin_lock_init(&sc->sc_resetlock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_init_crypto(sc);
+ ath9k_init_channels_rates(sc);
+ ath9k_init_misc(sc);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(ah);
+ sc->sc_ah = NULL;
+
+ return ret;
+}
+
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->rate_control_algorithm = "ath9k_rate_control";
+
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ if (error != 0)
+ goto error_init;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto error_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto error_tx;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto error_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto error_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto error_world;
+ }
+
+ INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
+ INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
+ sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+error_world:
+ ieee80211_unregister_hw(hw);
+error_register:
+ ath_rx_cleanup(sc);
+error_rx:
+ ath_tx_cleanup(sc);
+error_tx:
+ /* Nothing */
+error_regd:
+ ath9k_deinit_softc(sc);
+error_init:
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ if ((sc->btcoex.no_stomp_timer) &&
+ sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ ath9k_exit_debug(sc->sc_ah);
+ ath9k_hw_deinit(sc->sc_ah);
+
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int i = 0;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (aphy == NULL)
+ continue;
+ sc->sec_wiphy[i] = NULL;
+ ieee80211_unregister_hw(aphy->hw);
+ ieee80211_free_hw(aphy->hw);
+ }
+ kfree(sc->sec_wiphy);
+
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memset(dd, 0, sizeof(*dd));
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ /* Register rate control algorithm */
+ error = ath_rate_control_register();
+ if (error != 0) {
+ printk(KERN_ERR
+ "ath9k: Unable to register rate control "
+ "algorithm: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = ath9k_debug_create_root();
+ if (error) {
+ printk(KERN_ERR
+ "ath9k: Unable to create debugfs root: %d\n",
+ error);
+ goto err_rate_unregister;
+ }
+
+ error = ath_pci_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k: No PCI devices found, driver not installed.\n");
+ error = -ENODEV;
+ goto err_remove_root;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+
+ err_remove_root:
+ ath9k_debug_remove_root();
+ err_rate_unregister:
+ ath_rate_control_unregister();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ ath_ahb_exit();
+ ath_pci_exit();
+ ath9k_debug_remove_root();
+ ath_rate_control_unregister();
+ printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295e..29851e6376a9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_MAX = 37,
+};
+
struct ath_desc {
u32 ds_link;
u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 643bea35686f..9c8f925c2093 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static char *dev_info = "ath9k";
-
-MODULE_AUTHOR("Atheros Communications");
-MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-
-static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debugging mask");
-
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
static void ath_cache_conf_rate(struct ath_softc *sc,
struct ieee80211_conf *conf)
{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
return channel;
}
-static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -255,11 +143,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_enabled &&
- !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK)))
+ if (sc->ps_idle)
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ else if (sc->ps_enabled &&
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
@@ -316,7 +206,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
r = ath9k_hw_reset(ah, hchan, fastcc);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u Mhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
@@ -349,7 +239,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-static void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +253,6 @@ static void ath_ani_calibrate(unsigned long data)
short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
- /*
- * don't calibrate when we're scanning.
- * we are most likely not on our home channel.
- */
- spin_lock(&sc->ani_lock);
- if (sc->sc_flags & SC_OP_SCANNING)
- goto set_timer;
-
/* Only calibrate if awake */
if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
goto set_timer;
@@ -437,7 +319,6 @@ static void ath_ani_calibrate(unsigned long data)
ath9k_ps_restore(sc);
set_timer:
- spin_unlock(&sc->ani_lock);
/*
* Set timer interval based on previous results.
* The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +394,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-static void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +426,7 @@ static void ath9k_tasklet(unsigned long data)
*/
ath_print(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +527,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
}
chip_reset:
@@ -928,49 +809,12 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
clear_bit(key->hw_key_idx + 64, common->keymap);
if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
}
}
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 tx_streams, rx_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
-
- if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +836,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
* on the receipt of the first Beacon frame (i.e.,
* after time sync with the AP).
*/
- sc->sc_flags |= SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_BEACON_SYNC;
/* Configure the beacon */
ath_beacon_config(sc, vif);
@@ -1009,174 +853,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
}
}
-/********************************/
-/* LED functions */
-/********************************/
-
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
-static void ath_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-static void ath_deinit_leds(struct ath_softc *sc)
-{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-}
-
-static void ath_init_leds(struct ath_softc *sc)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
-}
-
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1194,7 +870,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) ",
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1249,7 +925,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1261,711 +937,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
- ah->rfkill_polarity;
-}
-
-static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- bool blocked = !!ath_is_rfkill_set(sc);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-static void ath_start_rfkill_poll(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(sc->hw->wiphy);
-}
-
-static void ath9k_uninit_hw(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- BUG_ON(!ah);
-
- ath9k_exit_debug(ah);
- ath9k_hw_detach(ah);
- sc->sc_ah = NULL;
-}
-
-static void ath_clean_core(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- int i = 0;
-
- ath9k_ps_wakeup(sc);
-
- dev_dbg(sc->dev, "Detach ATH hw\n");
-
- ath_deinit_leds(sc);
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
- ieee80211_unregister_hw(hw);
- ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
-
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
- if (!(sc->sc_flags & SC_OP_INVALID))
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
-
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- if ((sc->btcoex.no_stomp_timer) &&
- ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
-}
-
-void ath_detach(struct ath_softc *sc)
-{
- ath_clean_core(sc);
- ath9k_uninit_hw(sc);
-}
-
-void ath_cleanup(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_clean_core(sc);
- free_irq(sc->irq, sc);
- ath_bus_cleanup(common);
- kfree(sc->sec_wiphy);
- ieee80211_free_hw(sc->hw);
-
- ath9k_uninit_hw(sc);
-}
-
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
-
- return ath_reg_notifier_apply(wiphy, request, reg);
-}
-
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
- btcoex->bt_priority_cnt++;
-
- if (time_after(jiffies, btcoex->bt_priority_time +
- msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
- } else {
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
- }
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- }
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
- enum ath_stomp_type stomp_type)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
- }
-
- ath9k_hw_btcoex_enable(ah);
-}
-
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 timer_next,
- u32 timer_period)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
-
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *) data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_detect_bt_priority(sc);
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- btcoex->btcoex_no_stomp),
- btcoex->btcoex_no_stomp * 10);
- btcoex->hw_timer_enabled = true;
- }
-
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
- struct ath_softc *sc = (struct ath_softc *)arg;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-}
-
-static int ath_init_btcoex_timer(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
- btcoex->btcoex_period / 100;
-
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
-
- spin_lock_init(&btcoex->btcoex_lock);
-
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- iowrite32(val, sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- iowrite32(val, sc->mem + reg_offset);
-}
-
-static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- u32 val;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- val = ioread32(sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- val = ioread32(sc->mem + reg_offset);
- return val;
-}
-
-static const struct ath_ops ath9k_common_ops = {
- .read = ath9k_ioread32,
- .write = ath9k_iowrite32,
-};
-
-/*
- * Initialize and fill ath_softc, ath_sofct is the
- * "Software Carrier" struct. Historically it has existed
- * to allow the separation between hardware specific
- * variables (now in ath_hw) and driver specific variables.
- */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ath_hw *ah = NULL;
- struct ath_common *common;
- int r = 0, i;
- int csz = 0;
- int qnum;
-
- /* XXX: hardware will not be ready until ath_open() being called */
- sc->sc_flags |= SC_OP_INVALID;
-
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
- spin_lock_init(&sc->sc_serial_rw);
- spin_lock_init(&sc->ani_lock);
- spin_lock_init(&sc->sc_pm_lock);
- mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
- (unsigned long)sc);
-
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
- if (!ah)
- return -ENOMEM;
-
- ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
- sc->sc_ah = ah;
-
- common = ath9k_hw_common(ah);
- common->ops = &ath9k_common_ops;
- common->bus_ops = bus_ops;
- common->ah = ah;
- common->hw = sc->hw;
- common->priv = sc;
- common->debug_mask = ath9k_debug;
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- ath_read_cachesize(common, &csz);
- /* XXX assert csz is non-zero */
- common->cachelsz = csz << 2; /* convert to bytes */
-
- r = ath9k_hw_init(ah);
- if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", r);
- goto bad_free_hw;
- }
-
- if (ath9k_init_debug(ah) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto bad_free_hw;
- }
-
- /* Get the hardware key cache size. */
- common->keymax = ah->caps.keycache_size;
- if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
- common->keymax = ATH_KEYMAX;
- }
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath9k_hw_keyreset(ah, (u16) i);
-
- /* default to MONITOR mode */
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
-
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that the hal handles reseting
- * these queues at the needed time.
- */
- sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- r = -EIO;
- goto bad2;
- }
- sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- r = -EIO;
- goto bad2;
- }
-
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
- ath_cabq_update(sc);
-
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
- /* Setup data queues */
- /* NB: ensure BK queue is the lowest priority h/w queue */
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- /* Initializes the noise floor to a reasonable default value.
- * Later on this will be updated during ANI processing. */
-
- common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
-
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
- 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
- 1, NULL);
-
- sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- /* 11n Capabilities */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
-
- ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
- sc->rx.defant = ath9k_hw_getdefantenna(ah);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
- sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
-
- /* initialize beacon slots */
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
-
- /* setup channels and rates */
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-
- switch (ah->btcoex_hw.scheme) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- goto bad2;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- ath9k_hw_init_btcoex_hw(ah, qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-bad2:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-bad_free_hw:
- ath9k_uninit_hw(sc);
- return r;
-}
-
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->queues = 4;
- hw->max_rates = 4;
- hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
- /* Hardware supports 10 but we use 4 */
- hw->max_rate_tries = 4;
- hw->sta_data_size = sizeof(struct ath_node);
- hw->vif_data_size = sizeof(struct ath_vif);
-
- hw->rate_control_algorithm = "ath9k_rate_control";
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
-}
-
-/* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_common *common;
- struct ath_hw *ah;
- int error = 0, i;
- struct ath_regulatory *reg;
-
- dev_dbg(sc->dev, "Attach ATH hw\n");
-
- error = ath_init_softc(devid, sc, subsysid, bus_ops);
- if (error != 0)
- return error;
-
- ah = sc->sc_ah;
- common = ath9k_hw_common(ah);
-
- /* get mac address from hardware and set in mac80211 */
-
- SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
-
- ath_set_hw_capab(sc, hw);
-
- error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
- ath9k_reg_notifier);
- if (error)
- return error;
-
- reg = &common->regulatory;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
-
- /* initialize tx/rx engine */
- error = ath_tx_init(sc, ATH_TXBUF);
- if (error != 0)
- goto error_attach;
-
- error = ath_rx_init(sc, ATH_RXBUF);
- if (error != 0)
- goto error_attach;
-
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-
- error = ieee80211_register_hw(hw);
-
- if (!ath_is_world_regd(reg)) {
- error = regulatory_hint(hw->wiphy, reg->alpha2);
- if (error)
- goto error_attach;
- }
-
- /* Initialize LED control */
- ath_init_leds(sc);
-
- ath_start_rfkill_poll(sc);
-
- return 0;
-
-error_attach:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- ath9k_uninit_hw(sc);
-
- return error;
-}
-
int ath_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +947,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ieee80211_stop_queues(hw);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2017,131 +990,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ ieee80211_wake_queues(hw);
+
/* Start ANI */
ath_start_ani(common);
return r;
}
-/*
- * This function will allocate both the DMA descriptor structure, and the
- * buffers it contains. These are used to contain the descriptors used
- * by the system.
-*/
-int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head, const char *name,
- int nbuf, int ndesc)
-{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
- struct ath_buf *bf;
- int i, bsize, error;
-
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
-
- INIT_LIST_HEAD(head);
- /* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
- error = -ENOMEM;
- goto fail;
- }
-
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
-
- /*
- * Need additional DMA memory because we can't use
- * descriptors that cross the 4K page boundary. Assume
- * one skipped descriptor per 4K page.
- */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- u32 ndesc_skipped =
- ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
- u32 dma_len;
-
- while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
- dd->dd_desc_len += dma_len;
-
- ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
- }
-
- /* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
- ds = dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
-
- /* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
-
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += ndesc;
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
- }
- }
- list_add_tail(&bf->list, head);
- }
- return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
-}
-
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
@@ -2220,28 +1076,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
/* mac80211 callbacks */
/**********************/
-/*
- * (Re)start btcoex timers
- */
-static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
-
- /* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-
- mod_timer(&btcoex->period_timer, jiffies);
-}
-
static int ath9k_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1245,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
ath_print(common, ATH_DBG_PS,
"Wake up to complete TX\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
* The actual restore operation will happen only after
@@ -2468,22 +1302,6 @@ exit:
return 0;
}
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
-}
-
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1368,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
@@ -2567,7 +1385,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
ic_opmode = NL80211_IFTYPE_STATION;
break;
@@ -2578,11 +1396,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ret = -ENOBUFS;
goto out;
}
- ic_opmode = conf->type;
+ ic_opmode = vif->type;
break;
default:
ath_print(common, ATH_DBG_FATAL,
- "Interface type %d not yet supported\n", conf->type);
+ "Interface type %d not yet supported\n", vif->type);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2614,18 +1432,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
*/
- if ((conf->type == NL80211_IFTYPE_STATION) ||
- (conf->type == NL80211_IFTYPE_ADHOC) ||
- (conf->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (conf->type == NL80211_IFTYPE_AP ||
- conf->type == NL80211_IFTYPE_ADHOC ||
- conf->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MONITOR)
ath_start_ani(common);
out:
@@ -2634,12 +1452,12 @@ out:
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1480,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == conf->vif) {
+ if (sc->beacon.bslot[i] == vif) {
printk(KERN_DEBUG "%s: vif had allocated beacon "
"slot\n", __func__);
sc->beacon.bslot[i] = NULL;
@@ -2675,6 +1493,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
+void ath9k_enable_ps(struct ath_softc *sc)
+{
+ sc->ps_enabled = true;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2713,6 +1544,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&sc->wiphy_lock);
if (enable_radio) {
+ sc->ps_idle = false;
ath_radio_enable(sc, hw);
ath_print(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
@@ -2727,36 +1559,27 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- sc->sc_flags |= SC_OP_PS_ENABLED;
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
- }
- }
+ sc->ps_flags |= PS_ENABLED;
/*
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
+ ath9k_enable_ps(sc);
}
} else {
sc->ps_enabled = false;
- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
- SC_OP_NULLFUNC_COMPLETED);
+ sc->ps_flags &= ~(PS_ENABLED |
+ PS_NULLFUNC_COMPLETED);
ath9k_setpower(sc, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1589,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -2801,8 +1632,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
skip_chan_change:
- if (changed & IEEE80211_CONF_CHANGE_POWER)
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
+ ath_update_txpow(sc);
+ }
spin_lock_bh(&sc->wiphy_lock);
disable_radio = ath9k_all_wiphys_idle(sc);
@@ -2810,6 +1643,7 @@ skip_chan_change:
if (disable_radio) {
ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
+ sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -2966,6 +1800,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int slottime;
int error;
mutex_lock(&sc->mutex);
@@ -3001,6 +1836,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_beacon_config(sc, vif);
}
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ sc->beacon.slottime = slottime;
+ sc->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
/* Disable transmission of beacons */
if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3133,6 +1987,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +2003,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
aphy->state = ATH_WIPHY_SCAN;
ath9k_wiphy_pause_all_forced(sc, aphy);
-
- spin_lock_bh(&sc->ani_lock);
sc->sc_flags |= SC_OP_SCANNING;
- spin_unlock_bh(&sc->ani_lock);
+ del_timer_sync(&common->ani.timer);
+ cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
@@ -3159,17 +2013,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
- spin_lock_bh(&sc->ani_lock);
aphy->state = ATH_WIPHY_ACTIVE;
sc->sc_flags &= ~SC_OP_SCANNING;
sc->sc_flags |= SC_OP_FULL_RESET;
- spin_unlock_bh(&sc->ani_lock);
+ ath_start_ani(common);
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ath_beacon_config(sc, NULL);
mutex_unlock(&sc->mutex);
}
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+
+ mutex_lock(&sc->mutex);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
+ mutex_unlock(&sc->mutex);
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -3189,64 +2056,5 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
+ .set_coverage_class = ath9k_set_coverage_class,
};
-
-static int __init ath9k_init(void)
-{
- int error;
-
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- printk(KERN_ERR
- "ath9k: Unable to register rate control "
- "algorithm: %d\n",
- error);
- goto err_out;
- }
-
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
- error = ath_pci_init();
- if (error < 0) {
- printk(KERN_ERR
- "ath9k: No PCI devices found, driver not installed.\n");
- error = -ENODEV;
- goto err_remove_root;
- }
-
- error = ath_ahb_init();
- if (error < 0) {
- error = -ENODEV;
- goto err_pci_exit;
- }
-
- return 0;
-
- err_pci_exit:
- ath_pci_exit();
-
- err_remove_root:
- ath9k_debug_remove_root();
- err_rate_unregister:
- ath_rate_control_unregister();
- err_out:
- return error;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- ath_ahb_exit();
- ath_pci_exit();
- ath9k_debug_remove_root();
- ath_rate_control_unregister();
- printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
-}
-module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea54753..9441c6718a30 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,13 +18,14 @@
#include <linux/pci.h>
#include "ath9k.h"
-static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ 0 }
@@ -49,16 +50,6 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
}
-static void ath_pci_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
-
- pci_iounmap(pdev, sc->mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
-}
-
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_hw *ah = (struct ath_hw *) common->ah;
@@ -98,7 +89,6 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
- .cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
};
@@ -113,25 +103,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u16 subsysid;
u32 val;
int ret = 0;
- struct ath_hw *ah;
char hw_name[64];
if (pci_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
- goto bad;
+ goto err_dma;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
"DMA enable failed\n");
- goto bad;
+ goto err_dma;
}
/*
@@ -171,22 +158,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
ret = -ENODEV;
- goto bad;
+ goto err_region;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
printk(KERN_ERR "PCI memory map error\n") ;
ret = -EIO;
- goto bad1;
+ goto err_iomap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
- dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto bad2;
+ goto err_alloc_hw;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +188,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->dev = &pdev->dev;
sc->mem = mem;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
- goto bad3;
- }
-
- /* setup interrupt service routine */
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto bad4;
+ goto err_irq;
}
sc->irq = pdev->irq;
- ah = sc->sc_ah;
- ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
+ ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
printk(KERN_INFO
"%s: %s mem=0x%lx, irq=%d\n",
wiphy_name(hw->wiphy),
@@ -227,15 +214,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(unsigned long)mem, pdev->irq);
return 0;
-bad4:
- ath_detach(sc);
-bad3:
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
ieee80211_free_hw(hw);
-bad2:
+err_alloc_hw:
pci_iounmap(pdev, mem);
-bad1:
+err_iomap:
pci_release_region(pdev, 0);
-bad:
+err_region:
+ /* Nothing */
+err_dma:
pci_disable_device(pdev);
return ret;
}
@@ -245,8 +235,15 @@ static void ath_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
+
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
- ath_cleanup(sc);
+ pci_iounmap(pdev, mem);
+ pci_disable_device(pdev);
+ pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 70fdb9d8db82..11968843c773 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -678,13 +678,13 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* For Multi Rate Retry we use a different number of
* retry attempt counts. This ends up looking like this:
*
- * MRR[0] = 2
- * MRR[1] = 2
- * MRR[2] = 2
- * MRR[3] = 4
+ * MRR[0] = 4
+ * MRR[1] = 4
+ * MRR[2] = 4
+ * MRR[3] = 8
*
*/
- try_per_rate = sc->hw->max_rate_tries;
+ try_per_rate = 4;
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
@@ -714,7 +714,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
for ( ; i < 4; i++) {
/* Use twice the number of tries for the last MRR segment. */
if (i + 1 == 4)
- try_per_rate = 4;
+ try_per_rate = 8;
ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
/* All other rates in the series have RTS enabled */
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f506998..4f6d6fd442f4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS) \
+ || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae69..1ca42e5148c8 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
return; /* not from our current AP */
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
- if (sc->sc_flags & SC_OP_BEACON_SYNC) {
- sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
ath_print(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on "
"timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
*/
ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"PS wait for CAB frames timed out\n");
}
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
- else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
- } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having received "
- "PS-Poll data (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "PS-Poll data (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
}
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
+ ath_debug_stat_rx(sc, bf);
+
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8e653fb937a1..72cfa8ebd9ae 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1547,9 +1547,9 @@ enum {
#define AR_BT_COEX_WEIGHT 0x8174
#define AR_BT_COEX_WGHT 0xff55
-#define AR_STOMP_ALL_WLAN_WGHT 0xffcc
-#define AR_STOMP_LOW_WLAN_WGHT 0xaaa8
-#define AR_STOMP_NONE_WLAN_WGHT 0xaa00
+#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
+#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
+#define AR_STOMP_NONE_WLAN_WGHT 0x0000
#define AR_BTCOEX_BT_WGHT 0x0000ffff
#define AR_BTCOEX_BT_WGHT_S 0
#define AR_BTCOEX_WL_WGHT 0xffff0000
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e7..a43fbf84dab9 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
SET_IEEE80211_PERM_ADDR(hw, addr);
- ath_set_hw_capab(sc, hw);
+ ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 29bf33692f71..47294f90bbe5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1498,26 +1498,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
ctsrate |= rate->hw_value_short;
- /*
- * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
- * Check the first rate in the series to decide whether RTS/CTS
- * or CTS-to-self has to be used.
- */
- if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- flags = ATH9K_TXDESC_CTSENA;
- else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- flags = ATH9K_TXDESC_RTSENA;
-
- /* FIXME: Handle aggregation protection */
- if (sc->config.ath_aggr_prot &&
- (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
- flags = ATH9K_TXDESC_RTSENA;
- }
-
- /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
- flags &= ~(ATH9K_TXDESC_RTSENA);
-
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
int phy;
@@ -1529,8 +1509,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Tries = rates[i].count;
series[i].ChSel = common->tx_chainmask;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
+ (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_CTSENA;
+ }
+
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
series[i].RateFlags |= ATH9K_RATESERIES_2040;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1568,6 +1555,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
}
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+ flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (flags & ATH9K_TXDESC_RTSENA)
+ flags &= ~ATH9K_TXDESC_CTSENA;
+
/* set dur_update_en for l-sig computation except for PS-Poll frames */
ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
bf->bf_lastbf->bf_desc,
@@ -1648,7 +1643,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
/* tag if this is a nullfunc frame to enable PS when AP acks it */
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
bf->bf_isnullfunc = true;
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
} else
bf->bf_isnullfunc = false;
@@ -1858,15 +1853,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having "
- "received TX status (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "received TX status (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2048,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- } else
- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+ if ((sc->ps_flags & PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
/*
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index d6b685a06c5e..8263633c003c 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -65,11 +65,11 @@ enum ATH_DEBUG {
#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
#ifdef CONFIG_ATH_DEBUG
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
#else
-static inline void ath_print(struct ath_common *common,
- int dbg_mask,
- const char *fmt, ...)
+static inline void __attribute__ ((format (printf, 3, 4)))
+ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
{
}
#endif /* CONFIG_ATH_DEBUG */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 039ac490465c..04abd1f556b7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -110,8 +110,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
static inline bool is_wwr_sku(u16 regd)
{
- return ((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
- (regd == WORLD);
+ return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+ (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+ (regd == WORLD));
}
static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750f..9ab1192004c0 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced3..073be566d05e 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -3,6 +3,7 @@ config B43
depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
select SSB
select FW_LOADER
+ select SSB_BLOCKIO
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -78,14 +79,6 @@ config B43_SDIO
If unsure, say N.
-# Data transfers to the device via PIO
-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
-config B43_PIO
- bool
- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
- select SSB_BLOCKIO
- default y
-
config B43_NPHY
bool "Pre IEEE 802.11n support (BROKEN)"
depends on B43 && EXPERIMENTAL && BROKEN
@@ -137,12 +130,4 @@ config B43_DEBUG
for production use.
Only say Y, if you are debugging a problem in the b43 driver sourcecode.
-config B43_FORCE_PIO
- bool "Force usage of PIO instead of DMA"
- depends on B43 && B43_DEBUG
- ---help---
- This will disable DMA and always enable PIO instead.
- Say N!
- This is only for debugging the PIO engine code. You do
- _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542dc..5e83b6f0a3a0 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
-b43-$(CONFIG_B43_PIO) += pio.o
+b43-y += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c484cc253892..6a6ab0f630e5 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -254,6 +254,14 @@ enum {
#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
+/* SHM_SHARED tx iq workarounds */
+#define B43_SHM_SH_NPHY_TXIQW0 0x0700
+#define B43_SHM_SH_NPHY_TXIQW1 0x0702
+#define B43_SHM_SH_NPHY_TXIQW2 0x0704
+#define B43_SHM_SH_NPHY_TXIQW3 0x0706
+/* SHM_SHARED tx pwr ctrl */
+#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
+#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
/* SHM_SCRATCH offsets */
#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -822,11 +830,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
-#endif /* CONFIG_B43_PIO */
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -877,20 +883,9 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
-#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
-#else
-# define B43_FORCE_PIO 0
-#endif
-
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40a..be7abf8916ad 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1369,7 +1369,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "DMA tx mapping failure\n");
goto out;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1500,22 +1499,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_dmaring *ring;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = select_ring_by_priority(dev, i);
-
- stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
- stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
- stats[i].count = ring->nr_tx_packets;
- }
-}
-
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
@@ -1653,7 +1636,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1687,4 +1669,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f7ab37c4cdbc..dc91944d6022 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -228,8 +228,6 @@ struct b43_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -278,9 +276,6 @@ void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 490fb45d1d05..aa33d741e5e6 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
-
+MODULE_FIRMWARE("b43/ucode11.fw");
+MODULE_FIRMWARE("b43/ucode13.fw");
+MODULE_FIRMWARE("b43/ucode14.fw");
+MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode5.fw");
+MODULE_FIRMWARE("b43/ucode9.fw");
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +107,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+static int modparam_pio;
+module_param_named(pio, modparam_pio, int, 0444);
+MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -842,8 +851,10 @@ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32,
}
static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -852,19 +863,19 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
if (B43_WARN_ON(!modparam_hwtkip))
return;
- mutex_lock(&wl->mutex);
-
+ /* This is only called from the RX path through mac80211, where
+ * our mutex is already locked. */
+ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
dev = wl->current_dev;
- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
+ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
rx_tkip_phase1_write(dev, index, iv32, phase1key);
- keymac_write(dev, index, addr);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
+ /* only pairwise TKIP keys are supported right now */
+ if (WARN_ON(!sta))
+ return;
+ keymac_write(dev, index, sta->addr);
}
static void do_key_write(struct b43_wldev *dev,
@@ -1793,8 +1804,8 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
"on your system. Please use PIO instead.\n");
- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
- "your kernel configuration.\n");
+ b43err(dev->wl, "Unload the b43 module and reload "
+ "with 'pio=1'\n");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -3345,27 +3356,6 @@ out_unlock:
return err;
}
-static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43_wl *wl = hw_to_b43_wl(hw);
- struct b43_wldev *dev;
- int err = -ENODEV;
-
- mutex_lock(&wl->mutex);
- dev = wl->current_dev;
- if (dev && b43_status(dev) >= B43_STAT_STARTED) {
- if (b43_using_pio_transfers(dev))
- b43_pio_get_tx_stats(dev, stats);
- else
- b43_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- mutex_unlock(&wl->mutex);
-
- return err;
-}
-
static int b43_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -3569,6 +3559,12 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
phy = &dev->phy;
+ if (conf_is_ht(conf))
+ phy->is_40mhz =
+ (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
+ else
+ phy->is_40mhz = false;
+
b43_mac_suspend(dev);
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
@@ -3970,6 +3966,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_STARTED);
/* Start data flow (TX/RX). */
@@ -4360,7 +4357,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ modparam_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4379,8 +4376,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ieee80211_wake_queues(dev->wl->hw);
- ieee80211_wake_queues(dev->wl->hw);
-
b43_set_status(dev, B43_STAT_INITIALIZED);
out:
@@ -4395,7 +4390,7 @@ err_busdown:
}
static int b43_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -4403,24 +4398,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_MESH_POINT &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_MESH_POINT &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43dbg(wl, "Adding Interface type %d\n", conf->type);
+ b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
@@ -4435,17 +4430,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
}
static void b43_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- b43dbg(wl, "Removing Interface type %d\n", conf->type);
+ b43dbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43_WARN_ON(!wl->operating);
- B43_WARN_ON(wl->vif != conf->vif);
+ B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -4586,7 +4581,6 @@ static const struct ieee80211_ops b43_hw_ops = {
.set_key = b43_op_set_key,
.update_tkip_key = b43_op_update_tkip_key,
.get_stats = b43_op_get_stats,
- .get_tx_stats = b43_op_get_tx_stats,
.get_tsf = b43_op_get_tsf,
.set_tsf = b43_op_set_tsf,
.start = b43_op_start,
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 75b26e175e8f..8f7d7eff2d80 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -421,3 +421,48 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
{
b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
+struct b43_c32 b43_cordic(int theta)
+{
+ u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
+ 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
+ 229, 115, 57, 29, };
+ u8 i;
+ s32 tmp;
+ s8 signx = 1;
+ u32 angle = 0;
+ struct b43_c32 ret = { .i = 39797, .q = 0, };
+
+ while (theta > (180 << 16))
+ theta -= (360 << 16);
+ while (theta < -(180 << 16))
+ theta += (360 << 16);
+
+ if (theta > (90 << 16)) {
+ theta -= (180 << 16);
+ signx = -1;
+ } else if (theta < -(90 << 16)) {
+ theta += (180 << 16);
+ signx = -1;
+ }
+
+ for (i = 0; i <= 17; i++) {
+ if (theta > angle) {
+ tmp = ret.i - (ret.q >> i);
+ ret.q += ret.i >> i;
+ ret.i = tmp;
+ angle += arctg[i];
+ } else {
+ tmp = ret.i + (ret.q >> i);
+ ret.q -= ret.i >> i;
+ ret.i = tmp;
+ angle -= arctg[i];
+ }
+ }
+
+ ret.i *= signx;
+ ret.q *= signx;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9edd4e8e0c85..bd480b481bfc 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -5,6 +5,12 @@
struct b43_wldev;
+/* Complex number using 2 32-bit signed integers */
+struct b43_c32 { s32 i, q; };
+
+#define CORDIC_CONVERT(value) (((value) >= 0) ? \
+ ((((value) >> 15) + 1) >> 1) : \
+ -((((-(value)) >> 15) + 1) >> 1))
/* PHY register routing bits */
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
@@ -212,6 +218,9 @@ struct b43_phy {
bool supports_2ghz;
bool supports_5ghz;
+ /* HT info */
+ bool is_40mhz;
+
/* GMODE bit enabled? */
bool gmode;
@@ -418,5 +427,6 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
*/
void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff86..185219e0a552 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
maxpwr = bus->sprom.maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = bus->sprom.cck2gpo;
+ /*
+ * We don't read SPROM's opo as specs say. On rev8 SPROMs
+ * opo == ofdm2gpo and we don't know any SSB with LP-PHY
+ * and SPROM rev below 8.
+ */
+ B43_WARN_ON(bus->sprom.revision < 8);
ofdmpo = bus->sprom.ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
.c0 = 0,
};
-static u8 lpphy_nbits(s32 val)
-{
- u32 tmp = abs(val);
- u8 nbits = 0;
-
- while (tmp != 0) {
- nbits++;
- tmp >>= 1;
- }
-
- return nbits;
-}
-
static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
{
struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
goto out;
}
- prod_msb = lpphy_nbits(prod);
- q_msb = lpphy_nbits(qpwr);
+ prod_msb = fls(abs(prod));
+ q_msb = fls(abs(qpwr));
tmp1 = prod_msb - 20;
if (tmp1 >= 0) {
@@ -1773,47 +1767,6 @@ out:
return ret;
}
-/* Complex number using 2 32-bit signed integers */
-typedef struct {s32 i, q;} lpphy_c32;
-
-static lpphy_c32 lpphy_cordic(int theta)
-{
- u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
- 229, 115, 57, 29, };
- int i, tmp, signx = 1, angle = 0;
- lpphy_c32 ret = { .i = 39797, .q = 0, };
-
- theta = clamp_t(int, theta, -180, 180);
-
- if (theta > 90) {
- theta -= 180;
- signx = -1;
- } else if (theta < -90) {
- theta += 180;
- signx = -1;
- }
-
- for (i = 0; i <= 17; i++) {
- if (theta > angle) {
- tmp = ret.i - (ret.q >> i);
- ret.q += ret.i >> i;
- ret.i = tmp;
- angle += arctg[i];
- } else {
- tmp = ret.i + (ret.q >> i);
- ret.q -= ret.i >> i;
- ret.i = tmp;
- angle -= arctg[i];
- }
- }
-
- ret.i *= signx;
- ret.q *= signx;
-
- return ret;
-}
-
static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
u16 wait)
{
@@ -1831,8 +1784,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
- int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
- lpphy_c32 sample;
+ int i, samples = 0, angle = 0;
+ int rotation = (((36 * freq) / 20) << 16) / 100;
+ struct b43_c32 sample;
lpphy->tx_tone_freq = freq;
@@ -1848,10 +1802,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
}
for (i = 0; i < samples; i++) {
- sample = lpphy_cordic(angle);
+ sample = b43_cordic(angle);
angle += rotation;
- buf[i] = ((sample.i * max) & 0xFF) << 8;
- buf[i] |= (sample.q * max) & 0xFF;
+ buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
+ buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a78077..795bb1e3345d 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,50 @@
#include "b43.h"
#include "phy_n.h"
#include "tables_nphy.h"
+#include "main.h"
+struct nphy_txgains {
+ u16 txgm[2];
+ u16 pga[2];
+ u16 pad[2];
+ u16 ipa[2];
+};
+
+struct nphy_iqcal_params {
+ u16 txgm;
+ u16 pga;
+ u16 pad;
+ u16 ipa;
+ u16 cal_gain;
+ u16 ncorr[5];
+};
+
+struct nphy_iq_est {
+ s32 iq0_prod;
+ u32 i0_pwr;
+ u32 q0_pwr;
+ s32 iq1_prod;
+ u32 i1_pwr;
+ u32 q1_pwr;
+};
+
+enum b43_nphy_rf_sequence {
+ B43_RFSEQ_RX2TX,
+ B43_RFSEQ_TX2RX,
+ B43_RFSEQ_RESET2RX,
+ B43_RFSEQ_UPDATE_GAINH,
+ B43_RFSEQ_UPDATE_GAINL,
+ B43_RFSEQ_UPDATE_GAINU,
+};
+
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length);
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq);
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off);
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core);
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -197,173 +240,1020 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
~B43_NPHY_RFCTL_CMD_EN);
}
-#define ntab_upload(dev, offset, data) do { \
- unsigned int i; \
- for (i = 0; i < (offset##_SIZE); i++) \
- b43_ntab_write(dev, (offset) + i, (data)[i]); \
- } while (0)
-
-/* Upload the N-PHY tables. */
+/*
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
+ */
static void b43_nphy_tables_init(struct b43_wldev *dev)
{
- /* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
-
- /* Volatile tables */
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
- ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
- ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
}
-static void b43_nphy_workarounds(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
+
+ if (dev->phy.rev >= 3) {
+ if (ipa) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
+{
+ u32 tmslow;
+
+ if (dev->phy.type != B43_PHYTYPE_N)
+ return;
+
+ tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
+ if (force)
+ tmslow |= SSB_TMSLOW_FGC;
+ else
+ tmslow &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ B43_WARN_ON(1);
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ int i;
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
+ for (i = 0; i < 4; i++)
+ array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
- unsigned int i;
+ struct b43_phy_n *nphy = phy->n;
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (1 /* FIXME band is 2.4GHz */) {
- b43_phy_set(dev, B43_NPHY_CLASSCTL,
- B43_NPHY_CLASSCTL_CCKEN);
- } else {
- b43_phy_mask(dev, B43_NPHY_CLASSCTL,
- ~B43_NPHY_CLASSCTL_CCKEN);
- }
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_phy_write(dev, B43_NPHY_TXFRAMEDELAY, 8);
-
- /* Fixup some tables */
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x800);
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- //TODO set RF sequence
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 66);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 66);
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- if (0 /*FIXME*/) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 9);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 9);
-
- /* Set gain backoff */
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT);
+ if (enable) {
+ u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ unsigned int channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ /* FIXME: channel = radio_chanspec */
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i, j;
+ u8 code;
+
+ /* TODO: for PHY >= 3
+ s8 *lna1_gain, *lna2_gain;
+ u8 *gain_db, *gain_bits;
+ u16 *rfseq_init;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+ */
+
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
+ 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
+ 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
/* Set HPVGA2 index */
b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
~B43_NPHY_C1_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
~B43_NPHY_C2_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+
+ /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
- //FIXME verify that the specs really mean to use autoinc here.
- for (i = 0; i < 3; i++)
- b43_ntab_write(dev, B43_NTAB16(7, 0x106) + i, 0x673);
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++)
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, 3 * j);
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
+ }
}
+}
- /* Set minimum gain value */
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN,
- ~B43_NPHY_C1_MINGAIN,
- 23 << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN,
- ~B43_NPHY_C2_MINGAIN,
- 23 << B43_NPHY_C2_MINGAIN_SHIFT);
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- if (phy->rev < 2) {
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ /* TODO: convert to b43_ntab_write? */
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (bus->sprom.boardflags2_lo & 0x100 &&
+ bus->boardinfo.type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_crtl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ (u16)~B43_NPHY_PIL_DW_64QAM);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
}
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
}
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
+static int b43_nphy_load_samples(struct b43_wldev *dev,
+ struct b43_c32 *samples, u16 len) {
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 i;
+ u32 *data;
+
+ data = kzalloc(len * sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ b43err(dev->wl, "allocation for samples loading failed\n");
+ return -ENOMEM;
+ }
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ for (i = 0; i < len; i++) {
+ data[i] = (samples[i].i & 0x3FF << 10);
+ data[i] |= samples[i].q & 0x3FF;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data);
+
+ kfree(data);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
+static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
+ bool test)
{
- u16 bbcfg;
+ int i;
+ u16 bw, len, rot, angle;
+ struct b43_c32 *samples;
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA);
- b43_phy_write(dev, B43_NPHY_BBCFG,
- bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC);
+
+ bw = (dev->phy.is_40mhz) ? 40 : 20;
+ len = bw << 3;
+
+ if (test) {
+ if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX)
+ bw = 82;
+ else
+ bw = 80;
+
+ if (dev->phy.is_40mhz)
+ bw <<= 1;
+
+ len = bw << 1;
+ }
+
+ samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ if (!samples) {
+ b43err(dev->wl, "allocation for samples generation failed\n");
+ return 0;
+ }
+ rot = (((freq * 36) / bw) << 16) / 100;
+ angle = 0;
+
+ for (i = 0; i < len; i++) {
+ samples[i] = b43_cordic(angle);
+ angle += rot;
+ samples[i].q = CORDIC_CONVERT(samples[i].q * max);
+ samples[i].i = CORDIC_CONVERT(samples[i].i * max);
+ }
+
+ i = b43_nphy_load_samples(dev, samples, len);
+ kfree(samples);
+ return (i < 0) ? 0 : len;
}
-enum b43_nphy_rf_sequence {
- B43_RFSEQ_RX2TX,
- B43_RFSEQ_TX2RX,
- B43_RFSEQ_RESET2RX,
- B43_RFSEQ_UPDATE_GAINH,
- B43_RFSEQ_UPDATE_GAINL,
- B43_RFSEQ_UPDATE_GAINU,
-};
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
+static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
+ u16 wait, bool iqmode, bool dac_test)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 seq_mode;
+ u32 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if ((nphy->bb_mult_save & 0x80000000) == 0) {
+ tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
+ nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
+ }
+
+ if (!dev->phy.is_40mhz)
+ tmp = 0x6464;
+ else
+ tmp = 0x4747;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
+
+ if (loops != 0xFFFF)
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1));
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait);
+
+ seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER);
+ if (iqmode) {
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+ b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
+ } else {
+ if (dac_test)
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
+ }
+ for (i = 0; i < 100; i++) {
+ if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ b43err(dev->wl, "run samples timeout\n");
+
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq)
{
@@ -376,6 +1266,7 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
[B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
};
int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
@@ -389,8 +1280,181 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
}
b43err(dev->wl, "RF sequence status timeout\n");
ok:
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER));
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << core) & i) != 0) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((core & (1 << i)) != 0)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
+{
+ u8 i, j;
+ u16 reg, tmp, val;
+
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
+
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
+
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
}
static void b43_nphy_bphy_init(struct b43_wldev *dev)
@@ -411,81 +1475,1680 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
}
-/* RSSI Calibration */
-static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
+static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
+ s8 offset, u8 core, u8 rail, u8 type)
{
- //TODO
+ u16 tmp;
+ bool core1or5 = (core == 1) || (core == 5);
+ bool core2or5 = (core == 2) || (core == 5);
+
+ offset = clamp_val(offset, -32, 31);
+ tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
+
+ if (core1or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
+ if (core1or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
+ if (core2or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
+ if (core2or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
+ if (core1or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
+ if (core1or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
+ if (core2or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
+ if (core2or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
+ if (core1or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
+ if (core1or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
+ if (core2or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
+ if (core2or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
+ if (core1or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
+ if (core1or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
+ if (core2or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
+ if (core2or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
+ if (core1or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
+ if (core1or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
+ if (core2or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
+ if (core2or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
+ if (core1or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
+ if (core2or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
+ if (core1or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
+ if (core2or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
+}
+
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ /* TODO use some definitions */
+ if (code == 0) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
+ 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ 0xFEC7, 0x0180);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xEFDC, (code << 1 | 0x1021));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ }
+}
+
+static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 reg, val;
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
+ } else {
+ for (i = 0; i < 2; i++) {
+ if ((code == 1 && i == 1) || (code == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
+ b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
+
+ if (type < 3) {
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+ b43_phy_maskset(dev, reg, 0xFCFF, 0);
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_LUT_TRSW_UP1 :
+ B43_NPHY_RFCTL_LUT_TRSW_UP2;
+ b43_phy_maskset(dev, reg, 0xFFC3, 0);
+
+ if (type == 0)
+ val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ else if (type == 1)
+ val = 16;
+ else
+ val = 32;
+ b43_phy_set(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_TXF_40CO_B1S0 :
+ B43_NPHY_TXF_40CO_B32S1;
+ b43_phy_set(dev, reg, 0x0020);
+ } else {
+ if (type == 6)
+ val = 0x0100;
+ else if (type == 3)
+ val = 0x0200;
+ else
+ val = 0x0300;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+
+ b43_phy_maskset(dev, reg, 0xFCFF, val);
+ b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
+
+ if (type != 3 && type != 6) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ))
+ val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ else
+ val = 0x11;
+ reg = (i == 0) ? 0x2000 : 0x3000;
+ reg |= B2055_PADDRV;
+ b43_radio_write16(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 :
+ B43_NPHY_AFECTL_OVER;
+ b43_phy_set(dev, reg, 0x0200);
+ }
+ }
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
+static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_rev3_rssi_select(dev, code, type);
+ else
+ b43_nphy_rev2_rssi_select(dev, code, type);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
+static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
+{
+ int i;
+ for (i = 0; i < 2; i++) {
+ if (type == 2) {
+ if (i == 0) {
+ b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
+ 0xFC, buf[0]);
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xFC, buf[1]);
+ } else {
+ b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
+ 0xFC, buf[2 * i]);
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xFC, buf[2 * i + 1]);
+ }
+ } else {
+ if (i == 0)
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xF3, buf[0] << 2);
+ else
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xF3, buf[2 * i + 1] << 2);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
+static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
+ u8 nsamp)
+{
+ int i;
+ int out;
+ u16 save_regs_phy[9];
+ u16 s[2];
+
+ if (dev->phy.rev >= 3) {
+ save_regs_phy[0] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1);
+ save_regs_phy[1] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP2);
+ save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
+ save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
+ }
+
+ b43_nphy_rssi_select(dev, 5, type);
+
+ if (dev->phy.rev < 2) {
+ save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
+ }
+
+ for (i = 0; i < 4; i++)
+ buf[i] = 0;
+
+ for (i = 0; i < nsamp; i++) {
+ if (dev->phy.rev < 2) {
+ s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
+ s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
+ } else {
+ s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
+ s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
+ }
+
+ buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
+ buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
+ buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
+ buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
+ }
+ out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
+ (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
+
+ if (dev->phy.rev < 2)
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
+ save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ save_regs_phy[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
+ }
+
+ return out;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
+static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+{
+ int i, j;
+ u8 state[4];
+ u8 code, val;
+ u16 class, override;
+ u8 regs_save_radio[2];
+ u16 regs_save_phy[2];
+ s8 offset[4];
+
+ u16 clip_state[2];
+ u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+ s32 results_min[4] = { };
+ u8 vcm_final[4] = { };
+ s32 results[4][4] = { };
+ s32 miniq[4][2] = { };
+
+ if (type == 2) {
+ code = 0;
+ val = 6;
+ } else if (type < 2) {
+ code = 25;
+ val = 4;
+ } else {
+ B43_WARN_ON(1);
+ return;
+ }
+
+ class = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 7, 4);
+ b43_nphy_read_clip_detection(dev, clip_state);
+ b43_nphy_write_clip_detection(dev, clip_off);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ override = 0x140;
+ else
+ override = 0x110;
+
+ regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
+
+ regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
+
+ state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
+ state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
+ b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
+ b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
+ state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
+ state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
+
+ b43_nphy_rssi_select(dev, 5, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp[4];
+ for (j = 0; j < 4; j++)
+ tmp[j] = i;
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
+ b43_nphy_poll_rssi(dev, type, results[i], 8);
+ if (type < 2)
+ for (j = 0; j < 2; j++)
+ miniq[i][j] = min(results[i][2 * j],
+ results[i][2 * j + 1]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ s32 mind = 40;
+ u8 minvcm = 0;
+ s32 minpoll = 249;
+ s32 curr;
+ for (j = 0; j < 4; j++) {
+ if (type == 2)
+ curr = abs(results[j][i]);
+ else
+ curr = abs(miniq[j][i / 2] - code * 8);
+
+ if (curr < mind) {
+ mind = curr;
+ minvcm = j;
+ }
+
+ if (results[j][i] < minpoll)
+ minpoll = results[j][i];
+ }
+ results_min[i] = minpoll;
+ vcm_final[i] = minvcm;
+ }
+
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
+
+ for (i = 0; i < 4; i++) {
+ offset[i] = (code * 8) - results[vcm_final[i]][i];
+
+ if (offset[i] < 0)
+ offset[i] = -((abs(offset[i]) + 4) / 8);
+ else
+ offset[i] = (offset[i] + 4) / 8;
+
+ if (results_min[i] == 248)
+ offset[i] = code - 32;
+
+ if (i % 2 == 0)
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
+ type);
+ else
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
+ type);
+ }
+
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
+
+ switch (state[2]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 1, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 1, 0);
+ break;
+ case 2:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ }
+
+ switch (state[3]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 2, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 2, 0);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 2, 1);
+ break;
+ }
+
+ b43_nphy_rssi_select(dev, 0, type);
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
+
+ b43_nphy_classifier(dev, 7, class);
+ b43_nphy_write_clip_detection(dev, clip_state);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+{
+ /* TODO */
+}
+
+/*
+ * RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
+ */
+static void b43_nphy_rssi_cal(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3) {
+ b43_nphy_rev3_rssi_cal(dev);
+ } else {
+ b43_nphy_rev2_rssi_cal(dev, 2);
+ b43_nphy_rev2_rssi_cal(dev, 0);
+ b43_nphy_rev2_rssi_cal(dev, 1);
+ }
}
+/*
+ * Restore RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
+ */
+static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 *rssical_radio_regs = NULL;
+ u16 *rssical_phy_regs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (!nphy->rssical_chanspec_2G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
+ } else {
+ if (!nphy->rssical_chanspec_5G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
+ }
+
+ /* TODO use some definitions */
+ b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
+ b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ /* TODO If the chip is 47162
+ return txpwrctrl_tx_gain_ipa_rev5 */
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
+static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 *save = nphy->tx_rx_cal_radio_saveregs;
+ u16 tmp;
+ u8 offset, i;
+
+ if (dev->phy.rev >= 3) {
+ for (i = 0; i < 2; i++) {
+ tmp = (i == 0) ? 0x2000 : 0x3000;
+ offset = i * 11;
+
+ save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL);
+ save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL);
+ save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS);
+ save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS);
+ save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS);
+ save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV);
+ save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1);
+ save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2);
+ save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL);
+ save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC);
+ save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ if (nphy->ipa5g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 4);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 1);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F);
+ }
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0);
+ if (nphy->ipa2g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 6);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2,
+ (dev->phy.rev < 5) ? 0x11 : 0x01);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ }
+ }
+ b43_radio_write16(dev, tmp | B2055_XOREGUL, 0);
+ b43_radio_write16(dev, tmp | B2055_XOMISC, 0);
+ b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0);
+ }
+ } else {
+ save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
+
+ save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
+
+ save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
+
+ save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
+
+ save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
+ save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
+
+ if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
+ B43_NPHY_BANDCTL_5GHZ)) {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
+ } else {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
+ }
+
+ if (dev->phy.rev < 2) {
+ b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
+ b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
+ } else {
+ b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
+ b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
+static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 scale, entry;
+
+ u16 tmp = nphy->txcal_bbmult;
+ if (core == 0)
+ tmp >>= 8;
+ tmp &= 0xff;
+
+ for (i = 0; i < 18; i++) {
+ scale = (ladder_lo[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i), entry);
+
+ scale = (ladder_iq[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
+static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i;
+ for (i = 0; i < 15; i++)
+ b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
+ tbl_tx_filter_coef_rev4[2][i]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
+static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i, j;
+ /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
+ u16 offset[] = { 0x186, 0x195, 0x2C5 };
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[i] + j),
+ tbl_tx_filter_coef_rev4[i][j]);
+
+ if (dev->phy.is_40mhz) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[3][j]);
+ } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[5][j]);
+ }
+
+ if (dev->phy.channel == 14)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[6][j]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
+static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 curr_gain[2];
+ struct nphy_txgains target;
+ const u32 *table = NULL;
+
+ if (nphy->txpwrctrl == 0) {
+ int i;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ target.ipa[i] = curr_gain[i] & 0x000F;
+ target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
+ target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
+ target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
+ } else {
+ target.ipa[i] = curr_gain[i] & 0x0003;
+ target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
+ target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
+ target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
+ }
+ }
+ } else {
+ int i;
+ u16 index[2];
+ index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+ index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (dev->phy.rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ else if (dev->phy.rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0xF;
+ target.pad[i] = (table[index[i]] >> 20) & 0xF;
+ target.pga[i] = (table[index[i]] >> 24) & 0xF;
+ target.txgm[i] = (table[index[i]] >> 28) & 0xF;
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0x3;
+ target.pad[i] = (table[index[i]] >> 18) & 0x3;
+ target.pga[i] = (table[index[i]] >> 20) & 0x7;
+ target.txgm[i] = (table[index[i]] >> 23) & 0x7;
+ }
+ }
+ }
+
+ return target;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
+static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]);
+ b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]);
+ b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]);
+ b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+ b43_nphy_reset_cca(dev);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]);
+ b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
+static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+ u16 tmp;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ if (dev->phy.rev >= 3) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[3] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
+
+ regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
+ regs[5] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 3), 0);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 19));
+ regs[6] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 19), 0);
+ regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+
+ b43_nphy_rf_control_intc_override(dev, 2, 1, 3);
+ b43_nphy_rf_control_intc_override(dev, 1, 2, 1);
+ b43_nphy_rf_control_intc_override(dev, 1, 8, 2);
+
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 2));
+ regs[3] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 2), tmp);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 18));
+ regs[4] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ tmp = 0x0180;
+ else
+ tmp = 0x0120;
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
+static void b43_nphy_save_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+ u16 *txcal_radio_regs = NULL;
+ u8 *iqcal_chanspec;
+ u16 *table = NULL;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_2G;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ } else {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_5G;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ }
+
+ b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0x3022);
+ txcal_radio_regs[4] = b43_radio_read(dev, 0x2023);
+ txcal_radio_regs[5] = b43_radio_read(dev, 0x2024);
+ txcal_radio_regs[6] = b43_radio_read(dev, 0x3023);
+ txcal_radio_regs[7] = b43_radio_read(dev, 0x3024);
+ } else {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x8B);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0xBA);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
+ }
+ *iqcal_chanspec = nphy->radio_chanspec;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
+static void b43_nphy_restore_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 coef[4];
+ u16 *loft = NULL;
+ u16 *table = NULL;
+
+ int i;
+ u16 *txcal_radio_regs = NULL;
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (nphy->iqcal_chanspec_2G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ loft = &nphy->cal_cache.txcal_coeffs_2G[5];
+ } else {
+ if (nphy->iqcal_chanspec_5G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ loft = &nphy->cal_cache.txcal_coeffs_5G[5];
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+ table[i] = coef[i];
+ else
+ coef[i] = 0;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ } else {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ }
+
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
+ b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
+ b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
+ b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
+ b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
+ } else {
+ b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
+ }
+ b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
+static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
+ struct nphy_txgains target,
+ bool full, bool mphase)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ int error = 0;
+ int freq;
+ bool avoid = false;
+ u8 length;
+ u16 tmp, core, type, count, max, numb, last, cmd;
+ const u16 *table;
+ bool phy6or5x;
+
+ u16 buffer[11];
+ u16 diq_start = 0;
+ u16 save[2];
+ u16 gain[2];
+ struct nphy_iqcal_params params[2];
+ bool updated[2] = { };
+
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if (dev->phy.rev >= 4) {
+ avoid = nphy->hang_avoid;
+ nphy->hang_avoid = 0;
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
+ gain[i] = params[i].cal_gain;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain);
+
+ b43_nphy_tx_cal_radio_setup(dev);
+ b43_nphy_tx_cal_phy_setup(dev);
+
+ phy6or5x = dev->phy.rev >= 6 ||
+ (dev->phy.rev == 5 && nphy->ipa2g_on &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ if (phy6or5x) {
+ if (dev->phy.is_40mhz) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_40);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_40);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_20);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_20);
+ }
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+
+ if (!dev->phy.is_40mhz)
+ freq = 2500;
+ else
+ freq = 5000;
+
+ if (nphy->mphase_cal_phase_id > 2)
+ b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
+ 0xFFFF, 0, true, false);
+ else
+ error = b43_nphy_tx_tone(dev, freq, 250, true, false);
+
+ if (error == 0) {
+ if (nphy->mphase_cal_phase_id > 2) {
+ table = nphy->mphase_txcal_bestcoeffs;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ if (!full && nphy->txiqlocal_coeffsvalid) {
+ table = nphy->txiqlocal_bestc;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ full = true;
+ if (dev->phy.rev >= 3) {
+ table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
+ } else {
+ table = tbl_tx_iqlo_cal_startcoefs;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
+ }
+ }
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
+
+ if (full) {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
+ } else {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
+ }
+
+ if (mphase) {
+ count = nphy->mphase_txcal_cmdidx;
+ numb = min(max,
+ (u16)(count + nphy->mphase_txcal_numcmds));
+ } else {
+ count = 0;
+ numb = max;
+ }
+
+ for (; count < numb; count++) {
+ if (full) {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
+ } else {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_recal[count];
+ }
+
+ core = (cmd & 0x3000) >> 12;
+ type = (cmd & 0x0F00) >> 8;
+
+ if (phy6or5x && updated[core] == 0) {
+ b43_nphy_update_tx_cal_ladder(dev, core);
+ updated[core] = 1;
+ }
+
+ tmp = (params[core].ncorr[type] << 8) | 0x66;
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
+
+ if (type == 1 || type == 3 || type == 4) {
+ buffer[0] = b43_ntab_read(dev,
+ B43_NTAB16(15, 69 + core));
+ diq_start = buffer[0];
+ buffer[0] = 0;
+ b43_ntab_write(dev, B43_NTAB16(15, 69 + core),
+ 0);
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
+ for (i = 0; i < 2000; i++) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
+ if (tmp & 0xC000)
+ break;
+ udelay(10);
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length,
+ buffer);
+
+ if (type == 1 || type == 3 || type == 4)
+ buffer[0] = diq_start;
+ }
+
+ if (mphase)
+ nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
+
+ last = (dev->phy.rev < 3) ? 6 : 7;
+
+ if (!mphase || nphy->mphase_cal_phase_id == last) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer);
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer);
+ if (dev->phy.rev < 3) {
+ buffer[0] = 0;
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ buffer);
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->txiqlocal_bestc);
+ nphy->txiqlocal_coeffsvalid = true;
+ /* TODO: Set nphy->txiqlocal_chanspec to
+ the current channel */
+ } else {
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->mphase_txcal_bestcoeffs);
+ }
+
+ b43_nphy_stop_playback(dev);
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
+ }
+
+ b43_nphy_tx_cal_phy_cleanup(dev);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (dev->phy.rev >= 4)
+ nphy->hang_avoid = avoid;
+
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ return error;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
+static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 buffer[7];
+ bool equal = true;
+
+ if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ return;
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+ for (i = 0; i < 4; i++) {
+ if (buffer[i] != nphy->txiqlocal_bestc[i]) {
+ equal = false;
+ break;
+ }
+ }
+
+ if (!equal) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4,
+ nphy->txiqlocal_bestc);
+ for (i = 0; i < 4; i++)
+ buffer[i] = 0;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ &nphy->txiqlocal_bestc[5]);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ &nphy->txiqlocal_bestc[5]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
+static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j, index;
+ u8 rfctl[2];
+ u8 afectl_core;
+ u16 tmp[6];
+ u16 cur_hpf1, cur_hpf2, cur_lna;
+ u32 real, imag;
+ enum ieee80211_band band;
+
+ u8 use;
+ u16 cur_hpf;
+ u16 lna[3] = { 3, 3, 1 };
+ u16 hpf1[3] = { 7, 2, 0 };
+ u16 hpf2[3] = { 2, 0, 0 };
+ u32 power[3] = { };
+ u16 gain_save[2];
+ u16 cal_gain[2];
+ struct nphy_iqcal_params cal_params[2];
+ struct nphy_iq_est est;
+ int ret = 0;
+ bool playtone = true;
+ int desired = 13;
+
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_reapply_tx_cal_coeffs(dev);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
+ cal_gain[i] = cal_params[i].cal_gain;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ rfctl[0] = B43_NPHY_RFCTL_INTC1;
+ rfctl[1] = B43_NPHY_RFCTL_INTC2;
+ afectl_core = B43_NPHY_AFECTL_C1;
+ } else {
+ rfctl[0] = B43_NPHY_RFCTL_INTC2;
+ rfctl[1] = B43_NPHY_RFCTL_INTC1;
+ afectl_core = B43_NPHY_AFECTL_C2;
+ }
+
+ tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ tmp[2] = b43_phy_read(dev, afectl_core);
+ tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ tmp[4] = b43_phy_read(dev, rfctl[0]);
+ tmp[5] = b43_phy_read(dev, rfctl[1]);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ (1 - i));
+ b43_phy_set(dev, afectl_core, 0x0006);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
+
+ band = b43_current_band(dev->wl);
+
+ if (nphy->rxcalparams & 0xFF000000) {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x140);
+ else
+ b43_phy_write(dev, rfctl[0], 0x110);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x180);
+ else
+ b43_phy_write(dev, rfctl[0], 0x120);
+ }
+
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[1], 0x148);
+ else
+ b43_phy_write(dev, rfctl[1], 0x114);
+
+ if (nphy->rxcalparams & 0x10000) {
+ b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
+ (i + 1));
+ b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
+ (2 - i));
+ }
+
+ for (j = 0; i < 4; j++) {
+ if (j < 3) {
+ cur_lna = lna[j];
+ cur_hpf1 = hpf1[j];
+ cur_hpf2 = hpf2[j];
+ } else {
+ if (power[1] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 2;
+ } else {
+ if (power[0] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 1;
+ } else {
+ index = 0;
+ use = 2;
+ cur_hpf = cur_hpf2;
+ }
+ }
+ cur_lna = lna[index];
+ cur_hpf1 = hpf1[index];
+ cur_hpf2 = hpf2[index];
+ cur_hpf += desired - hweight32(power[index]);
+ cur_hpf = clamp_val(cur_hpf, 0, 10);
+ if (use == 1)
+ cur_hpf1 = cur_hpf;
+ else
+ cur_hpf2 = cur_hpf;
+ }
+
+ tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
+ (cur_lna << 2));
+ b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3,
+ false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_stop_playback(dev);
+
+ if (playtone) {
+ ret = b43_nphy_tx_tone(dev, 4000,
+ (nphy->rxcalparams & 0xFFFF),
+ false, false);
+ playtone = false;
+ } else {
+ b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
+ false, false);
+ }
+
+ if (ret == 0) {
+ if (j < 3) {
+ b43_nphy_rx_iq_est(dev, &est, 1024, 32,
+ false);
+ if (i == 0) {
+ real = est.i0_pwr;
+ imag = est.q0_pwr;
+ } else {
+ real = est.i1_pwr;
+ imag = est.q1_pwr;
+ }
+ power[i] = ((real + imag) / 1024) + 1;
+ } else {
+ b43_nphy_calc_rx_iq_comp(dev, 1 << i);
+ }
+ b43_nphy_stop_playback(dev);
+ }
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
+ b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
+ b43_phy_write(dev, rfctl[1], tmp[5]);
+ b43_phy_write(dev, rfctl[0], tmp[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
+ b43_phy_write(dev, afectl_core, tmp[2]);
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_nphy_rf_control_override(dev, 0x400, 0, 3, true);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+
+ b43_nphy_stay_in_carrier_search(dev, 0);
+
+ return ret;
+}
+
+static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ return -1;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
+static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ if (dev->phy.rev >= 3)
+ return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
+ else
+ return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
+}
+
+/*
+ * Init N-PHY
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ */
int b43_phy_initn(struct b43_wldev *dev)
{
+ struct ssb_bus *bus = dev->dev->bus;
struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+ u8 tx_pwr_state;
+ struct nphy_txgains target;
u16 tmp;
+ enum ieee80211_band tmp2;
+ bool do_rssi_cal;
- //TODO: Spectral management
+ u16 clip[2];
+ bool do_cal = false;
+
+ if ((dev->phy.rev >= 3) &&
+ (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
+ }
+ nphy->deaf_count = 0;
b43_nphy_tables_init(dev);
+ nphy->crsminpwr_adjusted = false;
+ nphy->noisevars_adjusted = false;
/* Clear all overrides */
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ }
b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ if (dev->phy.rev < 6) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ }
b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
~(B43_NPHY_RFSEQMODE_CAOVER |
B43_NPHY_RFSEQMODE_TROVER));
+ if (dev->phy.rev >= 3)
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
- tmp = (phy->rev < 2) ? 64 : 59;
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE,
- tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
-
+ if (dev->phy.rev <= 2) {
+ tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE,
+ tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
+ }
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- b43_phy_write(dev, B43_NPHY_TXREALFD, 184);
- b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200);
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80);
- b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511);
+ if (bus->sprom.boardflags2_lo & 0x100 ||
+ (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
+ bus->boardinfo.type == 0x8B))
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
+ else
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
+ b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
+ b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
- //TODO MIMO-Config
- //TODO Update TX/RX chain
+ b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+ b43_nphy_update_txrx_chain(dev);
if (phy->rev < 2) {
b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
}
+
+ tmp2 = b43_current_band(dev->wl);
+ if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
+ b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
+ nphy->papd_epsilon_offset[0] << 7);
+ b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
+ nphy->papd_epsilon_offset[1] << 7);
+ b43_nphy_int_pa_set_tx_dig_filters(dev);
+ } else if (phy->rev >= 5) {
+ b43_nphy_ext_pa_set_tx_dig_filters(dev);
+ }
+
b43_nphy_workarounds(dev);
- b43_nphy_reset_cca(dev);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN);
+ /* Reset CCA, in init code it differs a little from standard way */
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+
+ /* TODO N PHY MAC PHY Clock Set with argument 1 */
+
+ b43_nphy_pa_override(dev, false);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_pa_override(dev, true);
+
+ b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_read_clip_detection(dev, clip);
+ tx_pwr_state = nphy->txpwrctrl;
+ /* TODO N PHY TX power control with argument 0
+ (turning off power control) */
+ /* TODO Fix the TX Power Settings */
+ /* TODO N PHY TX Power Control Idle TSSI */
+ /* TODO N PHY TX Power Control Setup */
+
+ if (phy->rev >= 3) {
+ /* TODO */
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ }
+
+ if (nphy->phyrxchain != 3)
+ ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
+ if (nphy->mphase_cal_phase_id > 0)
+ ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+
+ do_rssi_cal = false;
+ if (phy->rev >= 3) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ else
+ do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+
+ if (do_rssi_cal)
+ b43_nphy_rssi_cal(dev);
+ else
+ b43_nphy_restore_rssi_cal(dev);
+ } else {
+ b43_nphy_rssi_cal(dev);
+ }
+
+ if (!((nphy->measure_hold & 0x6) != 0)) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_cal = (nphy->iqcal_chanspec_2G == 0);
+ else
+ do_cal = (nphy->iqcal_chanspec_5G == 0);
+
+ if (nphy->mute)
+ do_cal = false;
+
+ if (do_cal) {
+ target = b43_nphy_get_tx_gains(dev);
+
+ if (nphy->antsel_type == 2)
+ ;/*TODO NPHY Superswitch Init with argument 1*/
+ if (nphy->perical != 2) {
+ b43_nphy_rssi_cal(dev);
+ if (phy->rev >= 3) {
+ nphy->cal_orig_pwr_idx[0] =
+ nphy->txpwrindex[0].index_internal;
+ nphy->cal_orig_pwr_idx[1] =
+ nphy->txpwrindex[1].index_internal;
+ /* TODO N PHY Pre Calibrate TX Gain */
+ target = b43_nphy_get_tx_gains(dev);
+ }
+ }
+ }
+ }
+
+ if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
+ if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
+ b43_nphy_save_cal(dev);
+ else if (nphy->mphase_cal_phase_id == 0)
+ ;/* N PHY Periodic Calibration with argument 3 */
+ } else {
+ b43_nphy_restore_cal(dev);
+ }
- b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */
- //TODO read core1/2 clip1 thres regs
-
- if (1 /* FIXME Band is 2.4GHz */)
- b43_nphy_bphy_init(dev);
- //TODO disable TX power control
- //TODO Fix the TX power settings
- //TODO Init periodic calibration with reason 3
- b43_nphy_rssi_cal(dev, 2);
- b43_nphy_rssi_cal(dev, 0);
- b43_nphy_rssi_cal(dev, 1);
- //TODO get TX gain
- //TODO init superswitch
- //TODO calibrate LO
- //TODO idle TSSI TX pctl
- //TODO TX power control power setup
- //TODO table writes
- //TODO TX power control coefficients
- //TODO enable TX power control
- //TODO control antenna selection
- //TODO init radar detection
- //TODO reset channel if changed
+ b43_nphy_tx_pwr_ctrl_coef_setup(dev);
+ /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
+ b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
+ b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
+ if (phy->rev >= 3 && phy->rev <= 6)
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_nphy_tx_lp_fbw(dev);
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147d..403aad3f894f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
+#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
+#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
+#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
@@ -919,8 +924,99 @@
struct b43_wldev;
+struct b43_phy_n_iq_comp {
+ s16 a0;
+ s16 b0;
+ s16 a1;
+ s16 b1;
+};
+
+struct b43_phy_n_rssical_cache {
+ u16 rssical_radio_regs_2G[2];
+ u16 rssical_phy_regs_2G[12];
+
+ u16 rssical_radio_regs_5G[2];
+ u16 rssical_phy_regs_5G[12];
+};
+
+struct b43_phy_n_cal_cache {
+ u16 txcal_radio_regs_2G[8];
+ u16 txcal_coeffs_2G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_2G;
+
+ u16 txcal_radio_regs_5G[8];
+ u16 txcal_coeffs_5G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_5G;
+};
+
+struct b43_phy_n_txpwrindex {
+ s8 index;
+ s8 index_internal;
+ s8 index_internal_save;
+ u16 AfectrlOverride;
+ u16 AfeCtrlDacGain;
+ u16 rad_gain;
+ u8 bbmult;
+ u16 iqcomp_a;
+ u16 iqcomp_b;
+ u16 locomp;
+};
+
struct b43_phy_n {
- //TODO lots of missing stuff
+ u8 antsel_type;
+ u8 cal_orig_pwr_idx[2];
+ u8 measure_hold;
+ u8 phyrxchain;
+ u8 perical;
+ u32 deaf_count;
+ u32 rxcalparams;
+ bool hang_avoid;
+ bool mute;
+ u16 papd_epsilon_offset[2];
+ s32 preamble_override;
+ u32 bb_mult_save;
+ u16 radio_chanspec;
+
+ bool gain_boost;
+ bool elna_gain_config;
+ bool band5g_pwrgain;
+
+ u8 mphase_cal_phase_id;
+ u16 mphase_txcal_cmdidx;
+ u16 mphase_txcal_numcmds;
+ u16 mphase_txcal_bestcoeffs[11];
+
+ u8 txpwrctrl;
+ u16 txcal_bbmult;
+ u16 txiqlocal_bestc[11];
+ bool txiqlocal_coeffsvalid;
+ struct b43_phy_n_txpwrindex txpwrindex[2];
+
+ u8 txrx_chain;
+ u16 tx_rx_cal_phy_saveregs[11];
+ u16 tx_rx_cal_radio_saveregs[22];
+
+ u16 rfctrl_intc1_save;
+ u16 rfctrl_intc2_save;
+
+ u16 classifier_state;
+ u16 clip_state[2];
+
+ bool aband_spurwar_en;
+ bool gband_spurwar_en;
+
+ bool ipa2g_on;
+ u8 iqcal_chanspec_2G;
+ u8 rssical_chanspec_2G;
+
+ bool ipa5g_on;
+ u8 iqcal_chanspec_5G;
+ u8 rssical_chanspec_5G;
+
+ struct b43_phy_n_rssical_cache rssical_cache;
+ struct b43_phy_n_cal_cache cal_cache;
+ bool crsminpwr_adjusted;
+ bool noisevars_adjusted;
};
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index c01b8e02412f..a6062c3e89a5 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -559,7 +559,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "PIO transmission failure\n");
goto out;
}
- q->nr_tx_packets++;
B43_WARN_ON(q->buffer_used > q->buffer_size);
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
@@ -605,22 +604,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_pio_txqueue *q;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- q = select_queue_by_priority(dev, i);
-
- stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
- stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
- stats[i].count = q->nr_tx_packets;
- }
-}
-
/* Returns whether we should fetch another frame. */
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9ddad..1e516147424f 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -92,9 +90,6 @@ struct b43_pio_txqueue {
struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
struct list_head packets_list;
- /* Total number of transmitted packets. */
- unsigned int nr_tx_packets;
-
/* Shortcut to the 802.11 core revision. This is to
* avoid horrible pointer dereferencing in the fastpaths. */
u8 rev;
@@ -162,49 +157,9 @@ void b43_pio_free(struct b43_wldev *dev);
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
void b43_pio_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e2336315545..a00d509150f7 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
}
-const u8 b43_ntab_adjustpower0[] = {
+static const u8 b43_ntab_adjustpower0[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u8 b43_ntab_adjustpower1[] = {
+static const u8 b43_ntab_adjustpower1[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u16 b43_ntab_bdi[] = {
+static const u16 b43_ntab_bdi[] = {
0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
};
-const u32 b43_ntab_channelest[] = {
+static const u32 b43_ntab_channelest[] = {
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
0x10101010, 0x10101010, 0x10101010, 0x10101010,
};
-const u8 b43_ntab_estimatepowerlt0[] = {
+static const u8 b43_ntab_estimatepowerlt0[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_estimatepowerlt1[] = {
+static const u8 b43_ntab_estimatepowerlt1[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_framelookup[] = {
+static const u8 b43_ntab_framelookup[] = {
0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
};
-const u32 b43_ntab_framestruct[] = {
+static const u32 b43_ntab_framestruct[] = {
0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
0x09804506, 0x00100030, 0x09804507, 0x00100030,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_gainctl0[] = {
+static const u32 b43_ntab_gainctl0[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_gainctl1[] = {
+static const u32 b43_ntab_gainctl1[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_intlevel[] = {
+static const u32 b43_ntab_intlevel[] = {
0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
0x00C1188D, 0x080024D2, 0x00000070,
};
-const u32 b43_ntab_iqlt0[] = {
+static const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u32 b43_ntab_iqlt1[] = {
+static const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u16 b43_ntab_loftlt0[] = {
+static const u16 b43_ntab_loftlt0[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
0x0002, 0x0103,
};
-const u16 b43_ntab_loftlt1[] = {
+static const u16 b43_ntab_loftlt1[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
0x0002, 0x0103,
};
-const u8 b43_ntab_mcs[] = {
+static const u8 b43_ntab_mcs[] = {
0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-const u32 b43_ntab_noisevar10[] = {
+static const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u32 b43_ntab_noisevar11[] = {
+static const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u16 b43_ntab_pilot[] = {
+static const u16 b43_ntab_pilot[] = {
0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
};
-const u32 b43_ntab_pilotlt[] = {
+static const u32 b43_ntab_pilotlt[] = {
0x76540123, 0x62407351, 0x76543201, 0x76540213,
0x76540123, 0x76430521,
};
-const u32 b43_ntab_tdi20a0[] = {
+static const u32 b43_ntab_tdi20a0[] = {
0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi20a1[] = {
+static const u32 b43_ntab_tdi20a1[] = {
0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a0[] = {
+static const u32 b43_ntab_tdi40a0[] = {
0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a1[] = {
+static const u32 b43_ntab_tdi40a1[] = {
0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdtrn[] = {
+static const u32 b43_ntab_tdtrn[] = {
0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
};
-const u32 b43_ntab_tmap[] = {
+static const u32 b43_ntab_tmap[] = {
0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,544 @@ const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+const u32 b43_ntab_tx_gain_rev0_1_2[] = {
+ 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
+ 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
+ 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
+ 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
+ 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
+ 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
+ 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
+ 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
+ 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
+ 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
+ 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
+ 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
+ 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
+ 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
+ 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
+ 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
+ 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
+ 0x03902942, 0x03902844, 0x03902842, 0x03902744,
+ 0x03902742, 0x03902644, 0x03902642, 0x03902544,
+ 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
+ 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
+ 0x03802842, 0x03802744, 0x03802742, 0x03802644,
+ 0x03802642, 0x03802544, 0x03802542, 0x03802444,
+ 0x03802442, 0x03802344, 0x03802342, 0x03802244,
+ 0x03802242, 0x03802144, 0x03802142, 0x03802044,
+ 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
+ 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
+ 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
+ 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
+ 0x03801842, 0x03801744, 0x03801742, 0x03801644,
+ 0x03801642, 0x03801544, 0x03801542, 0x03801444,
+ 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
+};
+
+const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+ 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
+ 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
+ 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
+ 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
+ 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
+ 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
+ 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
+ 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
+ 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
+ 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
+ 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
+ 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
+ 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
+ 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
+ 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
+ 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
+ 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
+ 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
+ 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
+ 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
+ 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
+ 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
+ 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
+ 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
+ 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
+ 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
+ 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
+ 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
+ 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
+ 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
+ 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
+ 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
+};
+
+const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+ 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
+ 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
+ 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
+ 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
+ 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
+ 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
+ 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
+ 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
+ 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
+ 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
+ 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
+ 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
+ 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
+ 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
+ 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
+ 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
+ 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
+ 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
+ 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
+ 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
+ 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
+ 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
+ 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
+ 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
+ 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
+ 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
+ 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
+ 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
+ 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
+ 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
+ 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
+ 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
+};
+
+const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+ 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
+ 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
+ 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
+ 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
+ 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
+ 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
+ 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
+ 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
+ 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
+ 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
+ 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
+ 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
+ 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
+ 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
+ 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
+ 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
+ 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
+ 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
+ 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
+ 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
+ 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
+ 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
+ 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
+ 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
+ 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
+ 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
+ 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
+ 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
+ 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
+ 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
+ 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
+ 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
+};
+
+const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+ 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
+ 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
+ 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
+ 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
+ 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
+ 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
+ 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
+ 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
+ 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
+ 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
+ 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
+ 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
+ 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
+ 0x09620039, 0x09620037, 0x09620035, 0x09620033,
+ 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
+ 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
+ 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
+ 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
+ 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
+ 0x06620039, 0x06620037, 0x06620035, 0x06620033,
+ 0x05620046, 0x05620044, 0x05620042, 0x05620040,
+ 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
+ 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
+ 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
+ 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
+ 0x03620038, 0x03620037, 0x03620035, 0x03620033,
+ 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
+ 0x02620046, 0x02620044, 0x02620043, 0x02620042,
+ 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
+ 0x01620043, 0x01620042, 0x01620041, 0x01620040,
+ 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
+ 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
+};
+
+const u32 txpwrctrl_tx_gain_ipa[] = {
+ 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
+ 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
+ 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
+ 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
+ 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
+ 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
+ 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
+ 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
+ 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
+ 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
+ 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
+ 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
+ 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
+ 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
+ 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
+ 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
+ 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
+ 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
+ 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
+ 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
+ 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
+ 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
+ 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
+ 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
+ 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
+ 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
+ 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
+ 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
+ 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
+ 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
+ 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
+ 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+ 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
+ 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
+ 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
+ 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
+ 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
+ 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
+ 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
+ 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
+ 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
+ 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
+ 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
+ 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
+ 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
+ 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
+ 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
+ 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
+ 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
+ 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
+ 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
+ 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
+ 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
+ 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
+ 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
+ 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
+ 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
+ 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
+ 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
+ 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
+ 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
+ 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
+ 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
+ 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+ 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
+ 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
+ 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
+ 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
+ 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
+ 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
+ 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
+ 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
+ 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
+ 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
+ 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
+ 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
+ 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
+ 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
+ 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
+ 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
+ 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
+ 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
+ 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
+ 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
+ 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
+ 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
+ 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
+ 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
+ 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
+ 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
+ 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
+ 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
+ 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
+ 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
+ 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
+ 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+ 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
+ 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
+ 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
+ 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
+ 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
+ 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
+ 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
+ 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
+ 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
+ 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
+ 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
+ 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
+ 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
+ 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
+ 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
+ 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
+ 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
+ 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
+ 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
+ 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
+ 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
+ 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
+ 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
+ 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
+ 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
+ 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
+ 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
+ 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
+ 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
+ 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
+ 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
+ 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
+};
+
+const u16 tbl_iqcal_gainparams[2][9][8] = {
+ {
+ { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
+ { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
+ { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
+ { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
+ { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
+ { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
+ { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
+ { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
+ { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
+ },
+ {
+ { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
+ { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
+ { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
+ }
+};
+
+const struct nphy_txiqcal_ladder ladder_lo[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 25, 1 },
+ { 25, 2 },
+ { 25, 3 },
+ { 25, 4 },
+ { 25, 5 },
+ { 25, 6 },
+ { 25, 7 },
+ { 35, 7 },
+ { 50, 7 },
+ { 71, 7 },
+ { 100, 7 }
+};
+
+const struct nphy_txiqcal_ladder ladder_iq[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 35, 0 },
+ { 50, 0 },
+ { 71, 0 },
+ { 100, 0 },
+ { 100, 1 },
+ { 100, 2 },
+ { 100, 3 },
+ { 100, 4 },
+ { 100, 5 },
+ { 100, 6 },
+ { 100, 7 }
+};
+
+const u16 loscale[] = {
+ 256, 256, 271, 271,
+ 287, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 406, 406, 430,
+ 430, 455, 455, 482,
+ 482, 511, 511, 541,
+ 541, 573, 573, 607,
+ 607, 643, 643, 681,
+ 681, 722, 722, 764,
+ 764, 810, 810, 858,
+ 858, 908, 908, 962,
+ 962, 1019, 1019, 256
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 0x0200, 0x0300, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1201,
+ 0x1202, 0x1203, 0x1204, 0x1205,
+ 0x1206, 0x1207, 0x1907, 0x2307,
+ 0x3207, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 0x0300, 0x0500, 0x0700, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x1901,
+ 0x1902, 0x1903, 0x1904, 0x1905,
+ 0x1906, 0x1907, 0x2407, 0x3207,
+ 0x4607, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 0x0100, 0x0200, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1900,
+ 0x2300, 0x3200, 0x4700, 0x4701,
+ 0x4702, 0x4703, 0x4704, 0x4705,
+ 0x4706, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 0x0200, 0x0300, 0x0600, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x2400,
+ 0x3200, 0x4600, 0x6400, 0x6401,
+ 0x6402, 0x6403, 0x6404, 0x6405,
+ 0x6406, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
+
+const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
+
+const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 0x8423, 0x8323, 0x8073, 0x8256,
+ 0x8045, 0x8223, 0x9423, 0x9323,
+ 0x9073, 0x9256, 0x9045, 0x9223
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 0x8101, 0x8253, 0x8053, 0x8234,
+ 0x8034, 0x9101, 0x9253, 0x9053,
+ 0x9234, 0x9034
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 0x8123, 0x8264, 0x8086, 0x8245,
+ 0x8056, 0x9123, 0x9264, 0x9086,
+ 0x9245, 0x9056
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 0x8434, 0x8334, 0x8084, 0x8267,
+ 0x8056, 0x8234, 0x9434, 0x9334,
+ 0x9084, 0x9267, 0x9056, 0x9234
+};
+
+const s16 tbl_tx_filter_coef_rev4[7][15] = {
+ { -377, 137, -407, 208, -1527,
+ 956, 93, 186, 93, 230,
+ -44, 230, 20, -191, 201 },
+ { -77, 20, -98, 49, -93,
+ 60, 56, 111, 56, 26,
+ -5, 26, 34, -32, 34 },
+ { -360, 164, -376, 164, -1533,
+ 576, 308, -314, 308, 121,
+ -73, 121, 91, 124, 91 },
+ { -295, 200, -363, 142, -1391,
+ 826, 151, 301, 151, 151,
+ 301, 151, 602, -752, 602 },
+ { -92, 58, -96, 49, -104,
+ 44, 17, 35, 17, 12,
+ 25, 12, 13, 27, 13 },
+ { -375, 136, -399, 209, -1479,
+ 949, 130, 260, 130, 230,
+ -44, 230, 201, -191, 201 },
+ { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91,
+ 0x33a, 0x97, 0x12d, 0x97, 0x97,
+ 0x12d, 0x97, 0x25a, 0xd10, 0x25a }
+};
+
+/* addr0, addr1, bmask, shift */
+const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = {
+ { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */
+ { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */
+ { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */
+ { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */
+ { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */
+ { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */
+ { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */
+ { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */
+ { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */
+ { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */
+ { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */
+ { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */
+ { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */
+ { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */
+};
+
+/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */
+const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
+ { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */
+ { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
+ { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
+ { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
+ { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
+ { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
+ { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
+ { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
+ { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
+ { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
+ { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
+ { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
+ { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */
+ { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */
+ { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
+};
+
static inline void assert_ntab_array_sizes(void)
{
#undef check
@@ -2442,6 +2980,72 @@ static inline void assert_ntab_array_sizes(void)
#undef check
}
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_NTAB_8BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_NTAB_16BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ case B43_NTAB_32BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ value <<= 16;
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_NTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_NTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ *((u32 *)data) <<= 16;
+ *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
@@ -2474,3 +3078,91 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
/* Some compiletime assertions... */
assert_ntab_array_sizes();
}
+
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+#define ntab_upload(dev, offset, data) do { \
+ unsigned int i; \
+ for (i = 0; i < (offset##_SIZE); i++) \
+ b43_ntab_write(dev, (offset) + i, (data)[i]); \
+ } while (0)
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+
+ /* Volatile tables */
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
+ ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
+ ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+}
+
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ /* TODO */
+
+ /* Volatile tables */
+ /* TODO */
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec7..9c1c6ecd3672 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,27 @@ struct b43_nphy_channeltab_entry {
struct b43_wldev;
+struct nphy_txiqcal_ladder {
+ u8 percent;
+ u8 g_env;
+};
+
+struct nphy_rf_control_override_rev2 {
+ u8 addr0;
+ u8 addr1;
+ u16 bmask;
+ u8 shift;
+};
+
+struct nphy_rf_control_override_rev3 {
+ u16 val_mask;
+ u8 val_shift;
+ u8 en_addr0;
+ u8 val_addr0;
+ u8 en_addr1;
+ u8 val_addr1;
+};
+
/* Upload the default register value table.
* If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
* table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +147,57 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
+
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset);
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
-
-extern const u8 b43_ntab_adjustpower0[];
-extern const u8 b43_ntab_adjustpower1[];
-extern const u16 b43_ntab_bdi[];
-extern const u32 b43_ntab_channelest[];
-extern const u8 b43_ntab_estimatepowerlt0[];
-extern const u8 b43_ntab_estimatepowerlt1[];
-extern const u8 b43_ntab_framelookup[];
-extern const u32 b43_ntab_framestruct[];
-extern const u32 b43_ntab_gainctl0[];
-extern const u32 b43_ntab_gainctl1[];
-extern const u32 b43_ntab_intlevel[];
-extern const u32 b43_ntab_iqlt0[];
-extern const u32 b43_ntab_iqlt1[];
-extern const u16 b43_ntab_loftlt0[];
-extern const u16 b43_ntab_loftlt1[];
-extern const u8 b43_ntab_mcs[];
-extern const u32 b43_ntab_noisevar10[];
-extern const u32 b43_ntab_noisevar11[];
-extern const u16 b43_ntab_pilot[];
-extern const u32 b43_ntab_pilotlt[];
-extern const u32 b43_ntab_tdi20a0[];
-extern const u32 b43_ntab_tdi20a1[];
-extern const u32 b43_ntab_tdi40a0[];
-extern const u32 b43_ntab_tdi40a1[];
-extern const u32 b43_ntab_tdtrn[];
-extern const u32 b43_ntab_tmap[];
-
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
+
+extern const u32 b43_ntab_tx_gain_rev0_1_2[];
+extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
+extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
+
+extern const u32 txpwrctrl_tx_gain_ipa[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
+extern const u32 txpwrctrl_tx_gain_ipa_5g[];
+extern const u16 tbl_iqcal_gainparams[2][9][8];
+extern const struct nphy_txiqcal_ladder ladder_lo[];
+extern const struct nphy_txiqcal_ladder ladder_iq[];
+extern const u16 loscale[];
+
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
+extern const s16 tbl_tx_filter_coef_rev4[7][15];
+
+extern const struct nphy_rf_control_override_rev2
+ tbl_rf_control_override_rev2[];
+extern const struct nphy_rf_control_override_rev3
+ tbl_rf_control_override_rev3[];
#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 0a86bdf53154..8b9387c6ff36 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1411,7 +1411,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1527,25 +1526,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
spin_unlock(&ring->lock);
}
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43legacy_dmaring *ring;
- unsigned long flags;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = priority_to_txring(dev, i);
-
- spin_lock_irqsave(&ring->lock, flags);
- stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
- stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
- stats[i].count = ring->nr_tx_packets;
- spin_unlock_irqrestore(&ring->lock, flags);
- }
-}
-
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2f186003c31e..f9681041c2d8 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -243,8 +243,6 @@ struct b43legacy_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -292,9 +290,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
@@ -315,11 +310,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
}
static inline
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886b..1d070be5a678 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
+MODULE_FIRMWARE("b43legacy/ucode2.fw");
+MODULE_FIRMWARE("b43legacy/ucode4.fw");
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
@@ -2444,29 +2446,6 @@ static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
-static int b43legacy_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- unsigned long flags;
- int err = -ENODEV;
-
- if (!dev)
- goto out;
- spin_lock_irqsave(&wl->irq_lock, flags);
- if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) {
- if (b43legacy_using_pio(dev))
- b43legacy_pio_get_tx_stats(dev, stats);
- else
- b43legacy_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
-out:
- return err;
-}
-
static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2921,6 +2900,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
goto out;
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
@@ -3341,6 +3321,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
@@ -3361,7 +3342,7 @@ err_kfree_lo_control:
}
static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -3370,23 +3351,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43legacydbg(wl, "Adding Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3403,18 +3384,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
}
static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
- b43legacydbg(wl, "Removing Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(!wl->operating);
- B43legacy_WARN_ON(wl->vif != conf->vif);
+ B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -3509,7 +3490,6 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.bss_info_changed = b43legacy_op_bss_info_changed,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
- .get_tx_stats = b43legacy_op_get_tx_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
@@ -3960,7 +3940,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
static void b43legacy_print_driverinfo(void)
{
- const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
+ const char *feat_pci = "", *feat_leds = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3949,6 @@ static void b43legacy_print_driverinfo(void)
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
-#ifdef CONFIG_B43LEGACY_RFKILL
- feat_rfkill = "R";
-#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
@@ -3979,9 +3956,9 @@ static void b43legacy_print_driverinfo(void)
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s, Firmware-ID: "
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
- feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
+ feat_pci, feat_leds, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 51866c9a2769..017c0e9c37ef 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -477,7 +477,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
list_move_tail(&packet->list, &queue->txqueue);
queue->nr_txfree--;
- queue->nr_tx_packets++;
B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
tasklet_schedule(&queue->txtask);
@@ -546,18 +545,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
tasklet_schedule(&queue->txtask);
}
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_pio *pio = &dev->pio;
- struct b43legacy_pioqueue *queue;
-
- queue = pio->queue1;
- stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
- stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
- stats[0].count = queue->nr_tx_packets;
-}
-
static void pio_rx_error(struct b43legacy_pioqueue *queue,
int clear_buffers,
const char *error)
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 464fec05a06d..8e6773ea6e75 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -74,10 +74,6 @@ struct b43legacy_pioqueue {
* posted to the device. We are waiting for the txstatus.
*/
struct list_head txrunning;
- /* Total number or packets sent.
- * (This counter can obviously wrap).
- */
- unsigned int nr_tx_packets;
struct tasklet_struct txtask;
struct b43legacy_pio_txpacket
tx_packets_cache[B43legacy_PIO_MAXTXPACKETS];
@@ -106,8 +102,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status);
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
/* Suspend TX queue in hardware. */
@@ -140,11 +134,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
{
}
static inline
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
{
}
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c882184..d70732819423 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
int events = 0;
u16 ev;
+ /* Detect early interrupt before driver is fully configued */
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ return IRQ_HANDLED;
+ }
+
iface = netdev_priv(dev);
local = iface->local;
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f2..4d97ae37499b 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
/* FIX: do we need mb/wmb/rmb with memory operations? */
-static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a44..fc04ccdc5bef 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f81..9b72c45a7748 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
-static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bedc..63c2a7ade5fb 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
}
/* PCI driver stuff */
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b16b06c2031f..dc8ed1527666 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,8 @@
config IWLWIFI
tristate "Intel Wireless Wifi"
- depends on PCI && MAC80211 && EXPERIMENTAL
+ depends on PCI && MAC80211
select FW_LOADER
-config IWLWIFI_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwlagn driver"
- depends on IWLWIFI
- ---help---
- This option will enable spectrum measurement for the iwlagn driver.
-
config IWLWIFI_DEBUG
bool "Enable full debugging output in iwlagn and iwl3945 drivers"
depends on IWLWIFI
@@ -120,9 +114,3 @@ config IWL3945
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwl3945.
-
-config IWL3945_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwl3945 driver"
- depends on IWL3945
- ---help---
- This option will enable spectrum measurement for the iwl3945 driver.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7f82044af242..4e378faee650 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -3,7 +3,6 @@ iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
iwlcore-objs += iwl-scan.o iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
CFLAGS_iwl-devtrace.o := -I$(src)
@@ -20,3 +19,5 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff4..694ceef88590 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,8 +89,78 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
+static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+ .min_nrg_cck = 95,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 90,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 120,
+ .auto_corr_min_ofdm_mrc_x1 = 240,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 155,
+ .auto_corr_max_ofdm_mrc_x1 = 290,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 170,
+ .auto_corr_max_cck_mrc = 400,
+ .nrg_th_cck = 95,
+ .nrg_th_ofdm = 95,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwl5000_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl1000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
static struct iwl_lib_ops iwl1000_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
+ .set_hw_params = iwl1000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
@@ -105,6 +175,8 @@ static struct iwl_lib_ops iwl1000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -138,9 +210,10 @@ static struct iwl_lib_ops iwl1000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl1000_ops = {
+static const struct iwl_ops iwl1000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
.hcmd = &iwl5000_hcmd,
@@ -173,7 +246,8 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl1000_bg_cfg = {
@@ -200,6 +274,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 08ce259a0e60..042f6bc0df13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 6fd10d443ba3..3a876a8ece38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index a871d09d598f..abe2b739c4dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 5a1033ca7aaa..ce990adc51e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index d4b49883b30e..47909f94271e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc10..6940f086823c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1951,11 +1951,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
}
/* Add the broadcast address so we can send broadcast frames */
- if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
- IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
- return -EIO;
- }
+ priv->cfg->ops->lib->add_bcast_station(priv);
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -2796,6 +2792,7 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
+ .add_bcast_station = iwl3945_add_bcast_station,
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2804,7 +2801,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
};
-static struct iwl_ops iwl3945_ops = {
+static const struct iwl_ops iwl3945_ops = {
.ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
@@ -2830,6 +2827,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2847,9 +2845,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
-struct pci_device_id iwl3945_hw_card_ids[] = {
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a6..8f553f36d270 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern struct pci_device_id iwl3945_hw_card_ids[];
+extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -226,7 +226,8 @@ extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,int left);
-extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index c606366b582c..67ef562e8db1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c7574..aebe8c51d3e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -2206,9 +2206,10 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl4965_ops = {
+static const struct iwl_ops iwl4965_ops = {
.ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
@@ -2239,7 +2240,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index bc056e9ab85f..714e032f6217 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b744..f3d662c8cbcf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -179,14 +179,24 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
data->delta_gain_code[i] = 0;
continue;
}
- delta_g = (1000 * ((s32)average_noise[default_chain] -
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
+
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
- /* set negative sign */
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
data->delta_gain_code[i] |= (1 << 2);
}
@@ -263,8 +273,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 155,
- .auto_corr_max_ofdm_mrc_x1 = 290,
+ .auto_corr_max_ofdm_x1 = 120,
+ .auto_corr_max_ofdm_mrc_x1 = 240,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
@@ -412,12 +422,14 @@ static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
/*
* ucode
*/
-static int iwl5000_load_section(struct iwl_priv *priv,
- struct fw_desc *image,
- u32 dst_addr)
+static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
{
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -447,57 +459,36 @@ static int iwl5000_load_section(struct iwl_priv *priv,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
return ret;
}
if (!ret) {
- IWL_ERR(priv, "Could not load the INST uCode section\n");
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
return -ETIMEDOUT;
}
- priv->ucode_write_complete = 0;
-
- ret = iwl5000_load_section(
- priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
- if (ret)
- return ret;
+ return 0;
+}
- IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n");
+static int iwl5000_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ ret = iwl5000_load_section(priv, "INST", inst_image,
+ IWL50_RTC_INST_LOWER_BOUND);
+ if (ret)
return ret;
- } else if (!ret) {
- IWL_ERR(priv, "Could not load the DATA uCode section\n");
- return -ETIMEDOUT;
- } else
- ret = 0;
-
- priv->ucode_write_complete = 0;
- return ret;
+ return iwl5000_load_section(priv, "DATA", data_image,
+ IWL50_RTC_DATA_LOWER_BOUND);
}
int iwl5000_load_ucode(struct iwl_priv *priv)
@@ -781,7 +772,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
@@ -800,12 +791,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
if (txq_id != IWL_CMD_QUEUE_NUM)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1466,6 +1457,8 @@ struct iwl_lib_ops iwl5000_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1501,6 +1494,7 @@ struct iwl_lib_ops iwl5000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static struct iwl_lib_ops iwl5150_lib = {
@@ -1518,6 +1512,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1553,9 +1548,10 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl5000_ops = {
+static const struct iwl_ops iwl5000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
.hcmd = &iwl5000_hcmd,
@@ -1563,7 +1559,7 @@ static struct iwl_ops iwl5000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_ops iwl5150_ops = {
+static const struct iwl_ops iwl5150_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
.hcmd = &iwl5000_hcmd,
@@ -1600,7 +1596,8 @@ struct iwl_cfg iwl5300_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1625,6 +1622,8 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_abg_cfg = {
@@ -1647,6 +1646,8 @@ struct iwl_cfg iwl5100_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1671,7 +1672,8 @@ struct iwl_cfg iwl5100_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1696,7 +1698,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1721,7 +1724,8 @@ struct iwl_cfg iwl5150_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -1744,6 +1748,8 @@ struct iwl_cfg iwl5150_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 90185777d98b..ddba39999997 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e571049273..782e23a26984 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_ofdm = 145,
.auto_corr_max_ofdm_mrc = 232,
- .auto_corr_max_ofdm_x1 = 145,
+ .auto_corr_max_ofdm_x1 = 110,
.auto_corr_max_ofdm_mrc_x1 = 232,
.auto_corr_min_cck = 125,
@@ -158,11 +158,25 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_6x50:
+ priv->hw_params.calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+
+ break;
+ default:
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+ break;
+ }
+
return 0;
}
@@ -215,6 +229,8 @@ static struct iwl_lib_ops iwl6000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -250,9 +266,10 @@ static struct iwl_lib_ops iwl6000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl6000_ops = {
+static const struct iwl_ops iwl6000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
@@ -260,21 +277,6 @@ static struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
-};
-
-static struct iwl_ops iwl6050_ops = {
- .ucode = &iwl5000_ucode,
- .lib = &iwl6000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl6050_hcmd_utils,
- .led = &iwlagn_led_ops,
-};
-
/*
* "i": Internal configuration, use internal Power Amplifier
*/
@@ -306,7 +308,8 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -336,6 +339,8 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,6 +370,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@@ -373,7 +380,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
@@ -395,7 +402,8 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -404,7 +412,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
@@ -425,6 +433,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -455,7 +465,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 3bccba20f6da..1a24946bc203 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index ab55f92a161d..a594e4fdc6b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index b93e49158196..6aebcedaca8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index affc0c5a2f2c..e71923961e69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -191,7 +191,7 @@ enum {
IWL_RATE_2M_MASK)
#define IWL_CCK_RATES_MASK \
- (IWL_BASIC_RATES_MASK | \
+ (IWL_CCK_BASIC_RATES_MASK | \
IWL_RATE_5M_MASK | \
IWL_RATE_11M_MASK)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf815..1854c720b5e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -73,13 +73,7 @@
#define VD
#endif
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
+#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -203,7 +197,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
priv->start_calib = 0;
/* Add the broadcast address so we can send broadcast frames */
- iwl_add_bcast_station(priv);
+ priv->cfg->ops->lib->add_bcast_station(priv);
+
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -657,6 +652,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
iwl_send_statistics_request(priv, CMD_ASYNC, false);
}
+
+static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_read_targ_mem(priv, base);
+ num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -689,12 +809,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- RF_CARD_DISABLED)) {
+ CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +830,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
- if (flags & RF_CARD_DISABLED)
+ if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
- if (!(flags & RF_CARD_DISABLED))
+ if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
@@ -761,6 +883,8 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -774,7 +898,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
@@ -1634,7 +1757,7 @@ static const char *desc_lookup_text[] = {
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
- "UNKNOWN"
+ "ADVANCED SYSASSERT"
};
static const char *desc_lookup(int i)
@@ -1705,8 +1828,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
* iwl_print_event_log - Dump error event log to syslog
*
*/
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1840,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
else
@@ -1744,27 +1868,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1913,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
} else {
- if (next_entry < size)
- iwl_print_event_log(priv, 0, next_entry, mode);
- else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ if (next_entry < size) {
+ pos = iwl_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1940,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1949,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1961,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1838,7 +1987,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2002,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
@@ -1860,17 +2018,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
} else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
+ return pos;
}
/**
@@ -2276,18 +2439,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
return;
}
-static void iwl_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2304,7 +2455,13 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -2440,7 +2597,7 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_setup_mac(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2456,6 +2613,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -2470,7 +2631,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
*/
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@@ -2668,14 +2829,18 @@ void iwl_config_ap(struct iwl_priv *priv)
}
static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
+ iwl_update_tkip_key(priv, keyconf,
+ sta ? sta->addr : iwl_bcast_addr,
+ iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -2784,6 +2949,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
else
return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* do nothing */
+ return -EOPNOTSUPP;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -2833,6 +3001,8 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
break;
case STA_NOTIFY_AWAKE:
WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
sta_priv->asleep = false;
sta_id = iwl_find_station(priv, sta->addr);
if (sta_id != IWL_INVALID_STATION)
@@ -3109,7 +3279,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl_bg_up);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3126,6 +3295,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl_bg_statistics_periodic;
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl_bg_ucode_trace;
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3317,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3188,6 +3362,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3264,7 +3440,6 @@ static struct ieee80211_ops iwl_hw_ops = {
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.get_stats = iwl_mac_get_stats,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3439,9 +3614,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
- /**********************************
- * 8. Setup and register mac80211
- **********************************/
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
/* enable interrupts if needed: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
@@ -3452,14 +3627,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_enable_interrupts(priv);
- err = iwl_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -3471,6 +3638,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+
+ /**************************************************
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl_mac_setup_register(priv);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = iwl_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
+
return 0;
out_remove_sysfs:
@@ -3589,7 +3768,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static struct pci_device_id iwl_hw_card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7ea..845831ac053e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
static int iwl_sensitivity_write(struct iwl_priv *priv)
{
- int ret = 0;
struct iwl_sensitivity_cmd cmd ;
struct iwl_sensitivity_data *data = NULL;
struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- ret = iwl_send_cmd(priv, &cmd_out);
- if (ret)
- IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
-
- return ret;
+ return iwl_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index b6cef989a796..2b7b1df83ba0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e91507531923..c2f31eb26bef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,7 +120,6 @@ enum {
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
/* 802.11h related */
- RADAR_NOTIFICATION = 0x70, /* not used */
REPLY_QUIET_CMD = 0x71, /* not used */
REPLY_CHANNEL_SWITCH = 0x72,
CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2248,10 +2247,22 @@ struct iwl_link_quality_cmd {
__le32 reserved2;
} __attribute__ ((packed));
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ * bit 2 - 1: BT 2 wire support enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+#define BT_ENABLE_2_WIRE BIT(2)
+
#define BT_COEX_DISABLE (0x0)
-#define BT_COEX_MODE_2W (0x1)
-#define BT_COEX_MODE_3W (0x2)
-#define BT_COEX_MODE_4W (0x3)
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
#define BT_LEAD_TIME_MIN (0x0)
#define BT_LEAD_TIME_DEF (0x1E)
@@ -2510,7 +2521,7 @@ struct iwl_card_state_notif {
#define HW_CARD_DISABLED 0x01
#define SW_CARD_DISABLED 0x02
-#define RF_CARD_DISABLED 0x04
+#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
struct iwl_ct_kill_config {
@@ -2984,7 +2995,7 @@ struct statistics_rx_ht_phy {
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
- __le32 reserved2;
+ __le32 unsupport_mcs;
} __attribute__ ((packed));
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3098,8 @@ struct statistics_div {
} __attribute__ ((packed));
struct statistics_general {
- __le32 temperature;
- __le32 temperature_m;
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* for 5000 and up, this is radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3096,7 +3107,12 @@ struct statistics_general {
__le32 ttl_timestamp;
struct statistics_div div;
__le32 rx_enable_counter;
- __le32 reserved1;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
__le32 reserved2;
__le32 reserved3;
} __attribute__ ((packed));
@@ -3161,13 +3177,30 @@ struct iwl_notif_statistics {
/*
* MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
*/
-/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
- * then this notification will be sent. */
-#define CONSECUTIVE_MISSED_BCONS_TH 20
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
struct iwl_missed_beacon_notif {
- __le32 consequtive_missed_beacons;
+ __le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index d10bea64fce3..d390eef2efe5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -47,6 +47,26 @@ MODULE_VERSION(IWLWIFI_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
+
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
@@ -257,8 +277,8 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
- /* Set interrupt coalescing timer to 512 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -450,8 +470,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
if (priv->cfg->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
- (priv->cfg->sm_ps_mode << 2));
max_bit_rate = MAX_BIT_RATE_20_MHZ;
if (priv->hw_params.ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +654,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
static bool is_single_rx_stream(struct iwl_priv *priv)
{
- return !priv->current_ht_config.is_ht ||
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
priv->current_ht_config.single_chain_sufficient;
}
@@ -1003,28 +1021,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
*/
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
{
- int idle_cnt = active_cnt;
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-
- /* # Rx chains when idling and maybe trying to save power */
- switch (priv->cfg->sm_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
- IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- case WLAN_HT_CAP_SM_PS_INVALID:
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
default:
- IWL_ERR(priv, "invalid sm_ps mode %u\n",
- priv->cfg->sm_ps_mode);
- WARN_ON(1);
- break;
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
}
- return idle_cnt;
}
/* up to 4 chains */
@@ -1363,7 +1371,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
priv->cfg->ops->lib->dump_nic_error_log(priv);
- priv->cfg->ops->lib->dump_nic_event_log(priv, false);
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
iwl_print_rx_config_cmd(priv);
@@ -1813,6 +1825,16 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (val == 0xffffffff)
val = 0;
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
inta, inta_mask, val);
@@ -1975,13 +1997,20 @@ EXPORT_SYMBOL(iwl_isr_legacy);
int iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
- .flags = BT_COEX_MODE_4W,
.lead_time = BT_LEAD_TIME_DEF,
.max_kill = BT_MAX_KILL_DEF,
.kill_ack_mask = 0,
.kill_cts_mask = 0,
};
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
sizeof(struct iwl_bt_cmd), &bt_cmd);
}
@@ -2599,44 +2628,43 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
EXPORT_SYMBOL(iwl_set_mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
- unsigned long flags;
+ int err = 0;
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
+ IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
+
+ mutex_lock(&priv->mutex);
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
-
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->vif = vif;
+ priv->iw_mode = vif->type;
- mutex_lock(&priv->mutex);
-
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (vif->addr) {
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
}
- if (iwl_set_mode(priv, conf->type) == -EAGAIN)
+ if (iwl_set_mode(priv, vif->type) == -EAGAIN)
/* we are not ready, will run again when ready */
set_bit(STATUS_MODE_PENDING, &priv->status);
+ out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return err;
}
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2649,7 +2677,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
}
- if (priv->vif == conf->vif) {
+ if (priv->vif == vif) {
priv->vif = NULL;
memset(priv->bssid, 0, ETH_ALEN);
}
@@ -2689,6 +2717,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
}
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ }
/* during scanning mac80211 will delay channel setting until
* scan finish with changed = 0
@@ -2786,10 +2829,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2812,42 +2851,6 @@ out:
}
EXPORT_SYMBOL(iwl_mac_config);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_mac_get_tx_stats);
-
void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -3197,6 +3200,164 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_update_stats);
#endif
+const static char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_dump_csr(struct iwl_priv *priv)
+{
+ int i;
+ u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(priv, "CSR values:\n");
+ IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(priv, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(priv, csr_tbl[i]));
+ }
+}
+EXPORT_SYMBOL(iwl_dump_csr);
+
+const static char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_dump_fh);
+
+void iwl_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_is_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_internal_short_hw_scan(priv);
+ return;
+}
+EXPORT_SYMBOL(iwl_force_rf_reset);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..8f0c564e68b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <generated/utsrelease.h>
-
/************************
* forward declarations *
************************/
@@ -72,8 +70,8 @@ struct iwl_host_cmd;
struct iwl_cmd;
-#define IWLWIFI_VERSION UTS_RELEASE "-k"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -169,8 +167,11 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
void (*dump_nic_error_log)(struct iwl_priv *priv);
+ void (*dump_csr)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -187,6 +188,8 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
+ /* station management */
+ void (*add_bcast_station)(struct iwl_priv *priv);
};
struct iwl_led_ops {
@@ -230,8 +233,9 @@ struct iwl_mod_params {
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
- * @sm_ps_mode: spatial multiplexing power save mode
* @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -287,8 +291,9 @@ struct iwl_cfg {
const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
- u8 sm_ps_mode;
const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
};
/***************************
@@ -332,13 +337,11 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_config_ap(struct iwl_priv *priv);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
@@ -425,6 +428,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -495,6 +500,8 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_internal_short_hw_scan(struct iwl_priv *priv);
+void iwl_force_rf_reset(struct iwl_priv *priv);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
@@ -525,14 +532,6 @@ int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
void iwl_calib_free_results(struct iwl_priv *priv);
-/*******************************************************************************
- * Spectrum Measureemtns in iwl-spectrum.c
- ******************************************************************************/
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
-#else
-static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
-#endif
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
@@ -581,7 +580,10 @@ int iwl_pci_resume(struct pci_dev *pdev);
* Error Handling Debugging
******************************************************/
void iwl_dump_nic_error_log(struct iwl_priv *priv);
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+int iwl_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+void iwl_dump_csr(struct iwl_priv *priv);
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 1ec8cb4d5eae..1e00720bf8b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c9..1c7b53d511c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -67,57 +67,6 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-struct iwl_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *dir_data;
- struct dentry *dir_debug;
- struct dentry *dir_rf;
- struct dir_data_files {
- struct dentry *file_sram;
- struct dentry *file_nvm;
- struct dentry *file_stations;
- struct dentry *file_log_event;
- struct dentry *file_channels;
- struct dentry *file_status;
- struct dentry *file_interrupt;
- struct dentry *file_qos;
- struct dentry *file_thermal_throttling;
- struct dentry *file_led;
- struct dentry *file_disable_ht40;
- struct dentry *file_sleep_level_override;
- struct dentry *file_current_sleep_command;
- } dbgfs_data_files;
- struct dir_rf_files {
- struct dentry *file_disable_sensitivity;
- struct dentry *file_disable_chain_noise;
- struct dentry *file_disable_tx_power;
- } dbgfs_rf_files;
- struct dir_debug_files {
- struct dentry *file_rx_statistics;
- struct dentry *file_tx_statistics;
- struct dentry *file_traffic_log;
- struct dentry *file_rx_queue;
- struct dentry *file_tx_queue;
- struct dentry *file_ucode_rx_stats;
- struct dentry *file_ucode_tx_stats;
- struct dentry *file_ucode_general_stats;
- struct dentry *file_sensitivity;
- struct dentry *file_chain_noise;
- struct dentry *file_tx_power;
- struct dentry *file_power_save_status;
- struct dentry *file_clear_ucode_statistics;
- struct dentry *file_clear_traffic_statistics;
- } dbgfs_debug_files;
- u32 sram_offset;
- u32 sram_len;
-};
-
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
-#endif
-
#else
#define IWL_DEBUG(__priv, level, fmt, args...)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
@@ -126,9 +75,10 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
{}
#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-#ifndef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_dbgfs_unregister(struct iwl_priv *priv);
+#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699daf..d134301b553c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -41,43 +41,28 @@
#include "iwl-calib.h"
/* create and remove of files */
-#define DEBUGFS_ADD_DIR(name, parent) do { \
- dbgfs->dir_##name = debugfs_create_dir(#name, parent); \
- if (!(dbgfs->dir_##name)) \
- goto err; \
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, mode, \
- dbgfs->dir_##parent, priv, \
- &iwl_dbgfs_##name##_ops); \
- if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
- goto err; \
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_x32(#name, S_IRUSR, dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_REMOVE(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0);
-
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -125,7 +110,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
@@ -184,7 +169,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
int cnt;
@@ -232,28 +217,28 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
ssize_t ret;
int i;
int pos = 0;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
size_t bufsz;
/* default is to dump the entire data segment */
- if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
- priv->dbgfs->sram_offset = 0x800000;
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs->sram_len = priv->ucode_init_data.len;
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
else
- priv->dbgfs->sram_len = priv->ucode_data.len;
+ priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10;
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs->sram_len);
+ priv->dbgfs_sram_len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs->sram_offset);
- for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
- priv->dbgfs->sram_len - i);
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
if (i < 4) {
switch (i) {
case 1:
@@ -293,11 +278,11 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs->sram_offset = offset;
- priv->dbgfs->sram_len = len;
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
} else {
- priv->dbgfs->sram_offset = 0;
- priv->dbgfs->sram_len = 0;
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
}
return count;
@@ -306,7 +291,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
int max_sta = priv->hw_params.max_stations;
char *buf;
@@ -376,7 +361,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
loff_t *ppos)
{
ssize_t ret;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
@@ -420,6 +405,24 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -436,7 +439,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- priv->cfg->ops->lib->dump_nic_event_log(priv, true);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
return count;
}
@@ -446,7 +450,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct ieee80211_channel *channels = NULL;
const struct ieee80211_supported_band *supp_band = NULL;
int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,7 +523,7 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -567,7 +571,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -654,7 +658,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -677,7 +681,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -703,7 +707,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
char buf[100];
@@ -763,7 +767,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -811,7 +815,9 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
priv->power_data.debug_sleep_level_override = value;
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -820,7 +826,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[10];
int pos, value;
const size_t bufsz = sizeof(buf);
@@ -838,7 +844,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[200];
int pos = 0, i;
const size_t bufsz = sizeof(buf);
@@ -859,7 +865,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +982,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
@@ -1022,7 +1028,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_rx_queue *rxq = &priv->rxq;
char buf[256];
int pos = 0;
@@ -1063,36 +1069,33 @@ static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
return p;
}
+static const char ucode_stats_header[] =
+ "%-32s current acumulative delta max\n";
+static const char ucode_stats_short_format[] =
+ " %-30s %10u\n";
+static const char ucode_stats_format[] =
+ " %-30s %10u %10u %10u %10u\n";
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 20 +
- sizeof(struct statistics_rx_non_phy) * 20 +
- sizeof(struct statistics_rx_ht_phy) * 20 + 400;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm;
- struct statistics_rx_phy *cck, *accum_cck;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_ht_phy *ht, *accum_ht;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1111,264 +1114,401 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_cck = &priv->accum_statistics.rx.cck;
accum_general = &priv->accum_statistics.rx.general;
accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_cts:",
le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_ack:",
le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_bssid_frames:\t%u\t\t\t%u\n",
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "filtered_frames:\t%u\t\t\t%u\n",
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "filtered_frames:",
le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_beacons:",
le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "num_missed_bcon:\t%u\t\t\t%u\n",
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- "adc_rx_saturation_time:\t%u\t\t\t%u\n",
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ina_detect_search_tm:\t%u\t\t\t%u\n",
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "interference_data_flag:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_load:\t\t%u\t\t\t%u\n",
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_load:",
le32_to_cpu(general->channel_load),
- accum_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_false_alarms:\t%u\t\t\t%u\n",
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_a:\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c);
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1379,26 +1519,16 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
ssize_t ret;
- struct statistics_tx *tx, *accum_tx;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1411,106 +1541,148 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
*/
tx = &priv->statistics.tx;
accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "preamble:",
le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_timeout:",
le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_nxt_frame_mismatch:"
- "\t%u\t\t\t%u\n",
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_missing_nxt_frame:"
- "\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg underrun:\t\t\t%u\t\t\t%u\n",
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg underrun:",
le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt);
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1521,28 +1693,19 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_general) * 4 + 250;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
ssize_t ret;
struct statistics_general *general, *accum_general;
- struct statistics_dbg *dbg, *accum_dbg;
- struct statistics_div *div, *accum_div;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1557,52 +1720,78 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
dbg = &priv->statistics.general.dbg;
div = &priv->statistics.general.div;
accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature:",
le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature_m:",
le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_check:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_check:",
le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_count:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_count:",
le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sleep_time:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sleep_time:",
le32_to_cpu(general->sleep_time),
- accum_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_out:\t\t\t%u\t\t\t%u\n",
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_out:",
le32_to_cpu(general->slots_out),
- accum_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_idle:\t\t\t%u\t\t\t%u\n",
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_idle:",
le32_to_cpu(general->slots_idle),
- accum_general->slots_idle);
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "exec_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->exec_time), accum_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "probe_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->probe_time), accum_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_enable_counter:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter);
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1612,7 +1801,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1693,7 +1882,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1751,26 +1940,15 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[128];
int pos = 0;
- ssize_t ret;
const size_t bufsz = sizeof(buf);
struct statistics_tx *tx;
if (!iwl_is_alive(priv))
pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
else {
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv, "Error sending statistics request: %zd\n",
- ret);
- return -EAGAIN;
- }
tx = &priv->statistics.tx;
if (tx->tx_power.ant_a ||
tx->tx_power.ant_b ||
@@ -1802,7 +1980,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[60];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1845,6 +2023,206 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int scan;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &scan) != 1)
+ return -EINVAL;
+
+ iwl_internal_short_hw_scan(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->plcp_delta_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+ else
+ priv->cfg->plcp_delta_threshold = plcp;
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +2237,12 @@ DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_WRITE_FILE_OPS(internal_scan);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
/*
* Create the debugfs files and directories
@@ -1866,69 +2250,73 @@ DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
*/
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
- struct iwl_debugfs *dbgfs;
struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- int ret = 0;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
- dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- ret = -ENOMEM;
- goto err;
- }
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, phyd);
- if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
- ret = -ENOENT;
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
goto err;
- }
- DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
- DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
- DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
- DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
}
- DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_ADD_BOOL(disable_tx_power, rf,
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
err:
- IWL_ERR(priv, "Can't open the debugfs directory\n");
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
iwl_dbgfs_unregister(priv);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(iwl_dbgfs_register);
@@ -1938,56 +2326,11 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
*/
void iwl_dbgfs_unregister(struct iwl_priv *priv)
{
- if (!priv->dbgfs)
+ if (!priv->debugfs_dir)
return;
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sleep_level_override);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_current_sleep_command);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
- DEBUGFS_REMOVE(priv->dbgfs->dir_data);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_traffic_log);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_ucode_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_traffic_statistics);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_rx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_tx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_general_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_chain_noise);
- }
- DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
- DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
- kfree(priv->dbgfs);
- priv->dbgfs = NULL;
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
}
EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 3822cf53e368..55dc5a866542 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -512,6 +512,7 @@ struct iwl_ht_config {
bool is_ht;
bool is_40mhz;
bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
/* BSS related data */
u8 extension_chan_offset;
u8 ht_protection;
@@ -984,6 +985,56 @@ struct iwl_switch_rxon {
__le16 channel;
};
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1004,13 +1055,16 @@ struct iwl_priv {
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-#if defined(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
-#endif
+
/* ucode beacon time */
u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
/* we allocate array of iwl4965_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
@@ -1029,14 +1083,15 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long last_scan_jiffies;
unsigned long next_scan_jiffies;
unsigned long scan_start;
unsigned long scan_pass_start;
unsigned long scan_start_tsf;
+ unsigned long last_internal_scan_jiffies;
void *scan;
int scan_bands;
struct cfg80211_scan_request *scan_request;
+ bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1135,6 +1190,8 @@ struct iwl_priv {
struct iwl_notif_statistics statistics;
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
#endif
/* context information */
@@ -1207,15 +1264,10 @@ struct iwl_priv {
struct workqueue_struct *workqueue;
- struct work_struct up;
struct work_struct restart;
- struct work_struct calibrated_work;
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct update_link_led;
- struct work_struct auth_work;
- struct work_struct report_work;
struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
@@ -1251,7 +1303,8 @@ struct iwl_priv {
u16 rx_traffic_idx;
u8 *tx_traffic;
u8 *rx_traffic;
- struct iwl_debugfs *dbgfs;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#endif /* CONFIG_IWLWIFI_DEBUG */
@@ -1261,6 +1314,7 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
bool hw_ready;
/*For 3945*/
#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1322,8 @@ struct iwl_priv {
struct iwl3945_notif_statistics statistics_39;
u32 sta_supp_rates;
+
+ struct iwl_event_log event_log;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 83cc4e500a96..36580d8d8b8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,4 +37,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index d9c7363b1bbb..ff4d012ce260 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -91,6 +91,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a30969689ff..fd37152abae3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 0cd9c02ee044..4e1ba824dc50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 65fa8a69fd5a..113c3669b9ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -379,6 +379,25 @@
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54ec..86783c27d97c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(RADAR_NOTIFICATION);
IWL_CMD(REPLY_QUIET_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index bd0b12efb5c7..45af5bbc1c56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index e552d4c4bdbe..c719baf2585a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46c7a95b88f0..a6f9c918aabc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index f47f053f02ea..49a70baa3fb6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8ccc0bb1d9ed..1a1a9f081cc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -303,13 +303,12 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
sizeof(struct iwl_powertable_cmd), cmd);
}
-
+/* priv->mutex must be held */
int iwl_power_update_mode(struct iwl_priv *priv, bool force)
{
int ret = 0;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) &&
- (priv->hw->conf.flags & IEEE80211_CONF_PS);
+ bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
bool update_chains;
struct iwl_powertable_cmd cmd;
int dtimper;
@@ -319,7 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
if (priv->vif)
- dtimper = priv->vif->bss_conf.dtim_period;
+ dtimper = priv->hw->conf.ps_dtim_period;
else
dtimper = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 310c32e8f698..5db91c10dcc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6d95832db06d..d2d2a9174900 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 2dbce85404aa..0f718f6df5fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -473,8 +473,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 0x40);
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
@@ -499,9 +499,10 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consequtive_missed_beacons),
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -511,6 +512,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
+
+
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
@@ -564,15 +583,24 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
int i;
__le32 *prev_stats;
u32 *accum_stats;
+ u32 *delta, *max_delta;
prev_stats = (__le32 *)&priv->statistics;
accum_stats = (u32 *)&priv->accum_statistics;
+ delta = (u32 *)&priv->delta_statistics;
+ max_delta = (u32 *)&priv->max_delta;
for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
- *accum_stats += (le32_to_cpu(*stats) -
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
/* reset accumulative statistics for "no-counter" type statistics */
priv->accum_statistics.general.temperature =
@@ -592,11 +620,15 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
+#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(priv->statistics),
@@ -611,6 +643,56 @@ void iwl_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_DEBUG
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
+ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ iwl_force_rf_reset(priv);
+ }
+ }
+
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -638,11 +720,13 @@ void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
- memset(&priv->statistics, 0,
- sizeof(struct iwl_notif_statistics));
#ifdef CONFIG_IWLWIFI_DEBUG
memset(&priv->accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
+ memset(&priv->delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index fa1c89ba6459..f786a407638f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -192,19 +192,17 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec (%dms since last)\n",
+ "elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
- jiffies_to_msecs(elapsed_jiffies
- (priv->last_scan_jiffies, jiffies)));
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -250,8 +248,11 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
goto reschedule;
}
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
+ else
+ priv->last_internal_scan_jiffies = jiffies;
+
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
@@ -314,6 +315,72 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
@@ -421,6 +488,7 @@ static int iwl_scan_initiate(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@@ -461,15 +529,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* if we just finished scan ask for delay */
- if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
- time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
priv->scan_bands = 0;
for (i = 0; i < req->n_channels; i++)
priv->scan_bands |= BIT(req->channels[i]->band);
@@ -488,6 +547,54 @@ out_unlock:
}
EXPORT_SYMBOL(iwl_mac_hw_scan);
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1)
+
+int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ if (!iwl_is_ready_rf(priv)) {
+ ret = -EIO;
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+ goto out;
+ }
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (priv->last_internal_scan_jiffies &&
+ time_after(priv->last_internal_scan_jiffies +
+ IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) {
+ IWL_DEBUG_SCAN(priv, "internal scan rejected\n");
+ goto out;
+ }
+
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
+ else
+ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+
+ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = true;
+ queue_work(priv->workqueue, &priv->request_scan);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_internal_short_hw_scan);
+
#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
void iwl_bg_scan_check(struct work_struct *data)
@@ -544,14 +651,26 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
+ if (!priv->is_internal_short_scan &&
+ priv->scan_request->n_ssids) {
+ struct cfg80211_ssid *ssid =
+ priv->scan_request->ssids;
+
+ /* Broadcast if ssid_len is 0 */
+ *pos++ = ssid->ssid_len;
+ memcpy(pos, ssid->ssid, ssid->ssid_len);
+ pos += ssid->ssid_len;
+ len += 2 + ssid->ssid_len;
+ } else {
+ *pos++ = 0;
+ len += 2;
+ }
if (WARN_ON(left < ie_len))
return len;
- memcpy(pos, ies, ie_len);
+ if (ies)
+ memcpy(pos, ies, ie_len);
len += ie_len;
left -= ie_len;
@@ -654,7 +773,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
spin_lock_irqsave(&priv->lock, flags);
interval = priv->beacon_int;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -672,21 +790,29 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
+ /*
+ * The first SSID to scan is stuffed into the probe request
+ * template and the remaining ones are handled through the
+ * direct_scan array.
+ */
+ if (priv->scan_request->n_ssids > 1) {
+ int i, p = 0;
+ for (i = 1; i < priv->scan_request->n_ssids; i++) {
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
}
is_active = true;
} else
@@ -753,24 +879,38 @@ static void iwl_bg_request_scan(struct work_struct *data)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ }
scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
-
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
goto done;
@@ -831,7 +971,12 @@ void iwl_bg_scan_completed(struct work_struct *work)
cancel_delayed_work(&priv->scan_check);
- ieee80211_scan_completed(priv->hw, false);
+ if (!priv->is_internal_short_scan)
+ ieee80211_scan_completed(priv->hw, false);
+ else {
+ priv->is_internal_short_scan = false;
+ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
deleted file mode 100644
index 1ea5cd345fe8..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-spectrum.h"
-
-#define BEACON_TIME_MASK_LOW 0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH 0xFF000000
-#define TIME_UNIT 1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-/* TOOD: was used in sysfs debug interface need to add to mac */
-#if 0
-static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * 1024;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
- rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
- return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & BEACON_TIME_MASK_LOW;
- u32 addon_low = addon & BEACON_TIME_MASK_LOW;
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & BEACON_TIME_MASK_HIGH) +
- (addon & BEACON_TIME_MASK_HIGH);
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << 24);
- } else
- res += (1 << 24);
-
- return cpu_to_le32(res);
-}
-static int iwl_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl4965_spectrum_cmd spectrum;
- struct iwl_rx_packet *res;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .meta.flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
-
- if (iwl_is_associated(priv))
- add_time =
- iwl_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_is_associated(priv))
- spectrum.start_time =
- iwl_add_beacon_time(priv->last_beacon_time,
- add_time,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (res->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv,
- "Replaced existing measurement: %d\n",
- res->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- dev_kfree_skb_any(cmd.meta.u.skb);
-
- return rc;
-}
-#endif
-
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
-}
-EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a77c1e619062..af6babee2891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ieee80211 subsystem header files.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 90fbdb25399e..4a6686fa6b36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,46 +80,103 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
}
EXPORT_SYMBOL(iwl_get_ra_sta_id);
+/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
- sta_id);
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
- priv->stations[sta_id].sta.sta.addr);
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
}
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
+ pkt->hdr.flags);
return;
}
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
switch (pkt->u.add_sta.status) {
case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
iwl_sta_ucode_activate(priv, sta_id);
- /* fall through */
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ sta_id);
+ break;
default:
- IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
break;
}
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+
+ /*
+ * Determine if we wanted to modify or add a station,
+ * if adding a station succeeded we have some more initialization
+ * to do when using station notification. TODO
+ */
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static void iwl_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_process_add_sta_resp(priv, addsta, pkt, false);
+
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -145,24 +202,9 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
if (ret == 0) {
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- iwl_sta_ucode_activate(priv, sta->sta.sta_id);
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_WARN(priv, "REPLY_ADD_STA failed\n");
- break;
- }
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ iwl_process_add_sta_resp(priv, sta, pkt, true);
}
iwl_free_pages(priv, cmd.reply_page);
@@ -1003,24 +1045,19 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
u8 sta_id;
- /* Add station to device's station table */
-
/*
- * XXX: This check is definitely not correct, if we're an AP
- * it'll always be false which is not what we want, but
- * it doesn't look like iwlagn is prepared to be an HT
- * AP anyway.
+ * Set HT capabilities. It is ok to set this struct even if not using
+ * HT config: the priv->current_ht_config.is_ht flag will just be false
*/
- if (priv->current_ht_config.is_ht) {
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, addr);
+ if (sta) {
+ memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
+ cur_ht_config = &ht_config;
}
+ rcu_read_unlock();
+ /* Add station to device's station table */
sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
/* Set up default rate scaling table in device's station table */
@@ -1085,6 +1122,7 @@ static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
*/
void iwl_add_bcast_station(struct iwl_priv *priv)
{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
/* Set up default rate scaling table in device's station table */
@@ -1093,6 +1131,16 @@ void iwl_add_bcast_station(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_add_bcast_station);
/**
+ * iwl3945_add_bcast_station - add broadcast station into station table.
+ */
+void iwl3945_add_bcast_station(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
+ iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+}
+EXPORT_SYMBOL(iwl3945_add_bcast_station);
+
+/**
* iwl_get_sta_id - Find station's index within station table
*
* If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 8d052de2d405..2dc35fe28f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,6 +53,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_add_bcast_station(struct iwl_priv *priv);
+void iwl3945_add_bcast_station(struct iwl_priv *priv);
int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_clear_stations_table(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..d365d13e3291 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d02..eac2b9a95711 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -56,6 +56,7 @@
#include "iwl-helpers.h"
#include "iwl-core.h"
#include "iwl-dev.h"
+#include "iwl-spectrum.h"
/*
* module name, copyright, version, etc.
@@ -70,14 +71,13 @@
#define VD
#endif
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -689,10 +689,6 @@ drop:
return -1;
}
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
-#include "iwl-spectrum.h"
-
#define BEACON_TIME_MASK_LOW 0x00FFFFFF
#define BEACON_TIME_MASK_HIGH 0xFF000000
#define TIME_UNIT 1024
@@ -819,7 +815,6 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
return rc;
}
-#endif
static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -962,6 +957,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -975,7 +972,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
@@ -1518,8 +1514,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
* iwl3945_print_event_log - Dump error event log to syslog
*
*/
-static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1526,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
@@ -1555,26 +1552,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "0x%08x:%04u\n",
+ time, ev);
+ } else {
+ IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1596,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl3945_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl3945_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl3945_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
} else {
if (next_entry < size)
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1625,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1634,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (!iwl3945_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1642,7 +1666,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1682,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl3945_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl3945_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
-
+ return pos;
}
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
@@ -2996,18 +3033,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-static void iwl3945_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl3945_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -3024,7 +3049,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl3945_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl3945_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -3528,8 +3559,6 @@ static ssize_t store_filter_flags(struct device *d,
static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
store_filter_flags);
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
static ssize_t show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3599,7 +3628,6 @@ static ssize_t store_measurement(struct device *d,
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
show_measurement, store_measurement);
-#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
static ssize_t store_retry_rate(struct device *d,
struct device_attribute *attr,
@@ -3748,7 +3776,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl3945_bg_up);
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3782,9 +3809,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_dump_errors.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
&dev_attr_measurement.attr,
-#endif
&dev_attr_retry_rate.attr,
&dev_attr_statistics.attr,
&dev_attr_status.attr,
@@ -3810,7 +3835,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.config = iwl_mac_config,
.configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3840,6 +3864,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
iwl_reset_qos(priv);
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142bef..79ffa3b98d73 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
- struct list_head rx_packets[IWM_RX_ID_HASH + 1];
+ struct list_head rx_packets[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index f727b4a83196..ad8f7eabb5aa 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
struct iwm_umac_notif_mgt_frame *mgt_frame =
(struct iwm_umac_notif_mgt_frame *)buf;
struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
- u8 *ie;
IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
le16_to_cpu(mgt_frame->len));
if (ieee80211_is_assoc_req(mgt->frame_control)) {
- ie = mgt->u.assoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- ie = mgt->u.reassoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- ie = mgt->u.assoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- ie = mgt->u.reassoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
}
}
+static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ struct sk_buff_head list;
+ struct sk_buff *frame;
+
+ IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
+
+ __skb_queue_head_init(&list);
+ ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
+
+ while ((frame = __skb_dequeue(&list))) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += frame->len;
+
+ frame->protocol = eth_type_trans(frame, ndev);
+ frame->ip_summed = CHECKSUM_NONE;
+ memset(frame->cb, 0, sizeof(frame->cb));
+
+ if (netif_rx_ni(frame) == NET_RX_DROP) {
+ IWM_ERR(iwm, "Packet dropped\n");
+ ndev->stats.rx_dropped++;
+ }
+ }
+}
+
static void iwm_rx_process_packet(struct iwm_priv *iwm,
struct iwm_rx_packet *packet,
struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
switch (le16_to_cpu(ticket_node->ticket->action)) {
case IWM_RX_TICKET_RELEASE:
IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
- classify8023(skb);
+
iwm_rx_adjust_packet(iwm, packet, ticket_node);
+ skb->dev = iwm_to_ndev(iwm);
+ classify8023(skb);
+
+ if (le16_to_cpu(ticket_node->ticket->flags) &
+ IWM_RX_TICKET_AMSDU_MSK) {
+ iwm_rx_process_amsdu(iwm, skb);
+ break;
+ }
+
ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
if (ret < 0) {
IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
"%d\n", ret);
+ kfree_skb(packet->skb);
break;
}
IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
- skb->dev = iwm_to_ndev(iwm);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
memset(skb->cb, 0, sizeof(skb->cb));
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
if (netif_rx_ni(skb) == NET_RX_DROP) {
IWM_ERR(iwm, "Packet dropped\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67e..0485c9957575 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
depends on LIBERTAS
---help---
Debugging support.
+
+config LIBERTAS_MESH
+ bool "Enable mesh support"
+ depends on LIBERTAS
+ help
+ This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a053..45e870e33117 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
libertas-y += debugfs.o
libertas-y += ethtool.o
libertas-y += main.o
-libertas-y += mesh.o
libertas-y += rx.o
libertas-y += scan.o
libertas-y += tx.o
libertas-y += wext.o
+libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
usb8xxx-objs += if_usb.o
libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba8..f03d5e4e59c3 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET) {
- priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+ if (!ret && cmd_action == CMD_ACT_GET)
priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
- }
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
+ if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
preamble = RADIO_PREAMBLE_SHORT;
ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
+ if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
lbs_deb_join("AdhocJoin: Short preamble\n");
preamble = RADIO_PREAMBLE_SHORT;
}
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
struct assoc_request *assoc_req)
{
struct cmd_ds_802_11_ad_hoc_start cmd;
- u8 preamble = RADIO_PREAMBLE_LONG;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
size_t ratesize = 0;
u16 tmpcap = 0;
int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_ASSOC);
- if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- lbs_deb_join("ADHOC_START: Will use short preamble\n");
- preamble = RADIO_PREAMBLE_SHORT;
- }
-
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
goto out;
@@ -1169,11 +1160,11 @@ int lbs_adhoc_stop(struct lbs_private *priv)
static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && match_bss->wpa_ie[0] != WLAN_EID_GENERIC
- && match_bss->rsn_ie[0] != WLAN_EID_RSN
- && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
+ match_bss->rsn_ie[0] != WLAN_EID_RSN &&
+ !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1182,9 +1173,9 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1193,8 +1184,8 @@ static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && secinfo->WPAenabled
- && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
+ if (!secinfo->wep_enabled && secinfo->WPAenabled &&
+ (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
/* privacy bit may NOT be set in some APs like LinkSys WRT54G
&& (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
)
@@ -1219,11 +1210,11 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC)
- && (match_bss->rsn_ie[0] != WLAN_EID_RSN)
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
+ (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1534,8 +1525,8 @@ static int assoc_helper_associate(struct lbs_private *priv,
/* If we're given and 'any' BSSID, try associating based on SSID */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(bssid_any, assoc_req->bssid)
- && compare_ether_addr(bssid_off, assoc_req->bssid)) {
+ if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
+ compare_ether_addr(bssid_off, assoc_req->bssid)) {
ret = assoc_helper_bssid(priv, assoc_req);
done = 1;
}
@@ -1621,11 +1612,9 @@ static int assoc_helper_channel(struct lbs_private *priv,
goto restore_mesh;
}
- if ( assoc_req->secinfo.wep_enabled
- && (assoc_req->wep_keys[0].len
- || assoc_req->wep_keys[1].len
- || assoc_req->wep_keys[2].len
- || assoc_req->wep_keys[3].len)) {
+ if (assoc_req->secinfo.wep_enabled &&
+ (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
+ assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
/* Make sure WEP keys are re-sent to firmware */
set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
}
@@ -1992,14 +1981,14 @@ void lbs_association_worker(struct work_struct *work)
assoc_req->secinfo.auth_mode);
/* If 'any' SSID was specified, find an SSID to associate with */
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)
- && !assoc_req->ssid_len)
+ if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
+ !assoc_req->ssid_len)
find_any_ssid = 1;
/* But don't use 'any' SSID if there's a valid locked BSSID to use */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(assoc_req->bssid, bssid_any)
- && compare_ether_addr(assoc_req->bssid, bssid_off))
+ if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
+ compare_ether_addr(assoc_req->bssid, bssid_off))
find_any_ssid = 0;
}
@@ -2061,13 +2050,6 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if ( test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
- ret = assoc_helper_wep_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
ret = assoc_helper_secinfo(priv, assoc_req);
if (ret)
@@ -2080,18 +2062,31 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
+ /*
+ * v10 FW wants WPA keys to be set/cleared before WEP key operations,
+ * otherwise it will fail to correctly associate to WEP networks.
+ * Other firmware versions don't appear to care.
+ */
+ if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
ret = assoc_helper_wpa_keys(priv, assoc_req);
if (ret)
goto out;
}
+ if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
+ ret = assoc_helper_wep_keys(priv, assoc_req);
+ if (ret)
+ goto out;
+ }
+
+
/* SSID/BSSID should be the _last_ config option set, because they
* trigger the association attempt.
*/
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
+ if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
int success = 1;
ret = assoc_helper_associate(priv, assoc_req);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a3..82371ef39524 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
cmd.hwifversion, cmd.version);
- /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
- /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
- /* 5.110.22 have mesh command with 0xa3 command id */
- /* 10.0.0.p0 FW brings in mesh config command with different id */
- /* Check FW version MSB and initialize mesh_fw_ver */
- if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
- priv->mesh_fw_ver = MESH_FW_OLD;
- else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
- (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
- priv->mesh_fw_ver = MESH_FW_NEW;
- else
- priv->mesh_fw_ver = MESH_NONE;
-
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some firmware
* returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
if (priv->fwrelease < 0x09000000) {
switch (preamble) {
case RADIO_PREAMBLE_SHORT:
- if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
- goto out;
- /* Fall through */
case RADIO_PREAMBLE_AUTO:
case RADIO_PREAMBLE_LONG:
cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = 0;
break;
+#ifdef CONFIG_LIBERTAS_MESH
+
case CMD_BT_ACCESS:
ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
break;
+#endif
+
case CMD_802_11_BEACON_CTRL:
ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
((priv->connect_status == LBS_CONNECTED) ||
- (priv->mesh_connect_status == LBS_CONNECTED))) {
+ lbs_mesh_connected(priv))) {
if (priv->secinfo.WPAenabled ||
priv->secinfo.WPA2enabled) {
/* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef70..cb4138a55fdf 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-/* Mesh related */
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd);
-
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type);
-
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
-
-
/* Commands only used in wext.c, assoc. and scan.c */
int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20a..e7470442f76b 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -240,11 +240,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
/* Now we got response from FW, cancel the command timer */
del_timer(&priv->command_timer);
priv->cmd_timed_out = 0;
- if (priv->nr_retries) {
- lbs_pr_info("Received result %x to command %x after %d retries\n",
- result, curcmd, priv->nr_retries);
- priv->nr_retries = 0;
- }
/* Store the response code to cur_cmd_retcode. */
priv->cur_cmd_retcode = result;
@@ -485,20 +480,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
break;
case MACREG_INT_CODE_MESH_AUTO_STARTED:
- /* Ignore spurious autostart events if autostart is disabled */
- if (!priv->mesh_autostart_enabled) {
- lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
- break;
- }
- lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
- priv->mesh_connect_status = LBS_CONNECTED;
- if (priv->mesh_open) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
- priv->mode = IW_MODE_ADHOC;
- schedule_work(&priv->sync_channel);
+ /* Ignore spurious autostart events */
+ lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
break;
default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5b..ea3f10ef4e00 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
KEY_INFO_WPA_ENABLED = 0x04
};
-/** mesh_fw_ver */
-enum _mesh_fw_ver {
- MESH_NONE = 0, /* MESH is not supported */
- MESH_FW_OLD, /* MESH is supported in FW V5 */
- MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
-};
-
/* Default values for fwt commands. */
#define FWT_DEFAULT_METRIC 0
#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae9..6977ee820214 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
+#ifdef CONFIG_LIBERTAS_MESH
u32 mesh_connect_status;
struct lbs_mesh_stats mstats;
int mesh_open;
- int mesh_fw_ver;
- int mesh_autostart_enabled;
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
- struct work_struct sync_channel;
+#endif
/* Monitor mode */
struct net_device *rtap_net_dev;
@@ -110,7 +109,6 @@ struct lbs_private {
struct list_head cmdpendingq; /* pending command buffers */
wait_queue_head_t cmd_pending;
struct timer_list command_timer;
- int nr_retries;
int cmd_timed_out;
/* Command responses sent from the hardware to the driver */
@@ -176,9 +174,7 @@ struct lbs_private {
struct bss_descriptor *networks;
struct assoc_request * pending_assoc_req;
struct assoc_request * in_progress_assoc_req;
- u16 capability;
uint16_t enablehwauto;
- uint16_t ratebitmap;
/* ADHOC */
u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2b..3804a58d7f4e 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
.get_drvinfo = lbs_ethtool_get_drvinfo,
.get_eeprom = lbs_ethtool_get_eeprom,
.get_eeprom_len = lbs_ethtool_get_eeprom_len,
+#ifdef CONFIG_LIBERTAS_MESH
.get_sset_count = lbs_mesh_ethtool_get_sset_count,
.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
.get_strings = lbs_mesh_ethtool_get_strings,
+#endif
.get_wol = lbs_ethtool_get_wol,
.set_wol = lbs_ethtool_set_wol,
};
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index bf4bfbae6227..3ea03f259ee7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/semaphore.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f21..cd8ed7fdafad 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
if (priv->monitormode == monitor_mode)
return strlen(buf);
if (!priv->monitormode) {
- if (priv->infra_open || priv->mesh_open)
+ if (priv->infra_open || lbs_mesh_open(priv))
return -EBUSY;
if (priv->mode == IW_MODE_INFRA)
lbs_cmd_80211_deauthenticate(priv,
@@ -536,31 +536,14 @@ static int lbs_thread(void *data)
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
- if (++priv->nr_retries > 3) {
- lbs_pr_info("Excessive timeouts submitting "
- "command 0x%04x\n",
- le16_to_cpu(cmdnode->cmdbuf->command));
- lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- priv->nr_retries = 0;
- if (priv->reset_card)
- priv->reset_card(priv);
- } else {
- priv->cur_cmd = NULL;
- priv->dnld_sent = DNLD_RES_RECEIVED;
- lbs_pr_info("requeueing command 0x%04x due "
- "to timeout (#%d)\n",
- le16_to_cpu(cmdnode->cmdbuf->command),
- priv->nr_retries);
-
- /* Stick it back at the _top_ of the pending queue
- for immediate resubmission */
- list_add(&cmdnode->list, &priv->cmdpendingq);
- }
+ lbs_pr_info("Timeout submitting command 0x%04x\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+ lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
+ if (priv->reset_card)
+ priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
-
-
if (!priv->fw_ready)
continue;
@@ -622,7 +605,7 @@ static int lbs_thread(void *data)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
if (priv->mesh_dev &&
- priv->mesh_connect_status == LBS_CONNECTED)
+ lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
}
@@ -732,7 +715,7 @@ done:
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
-static void command_timer_fn(unsigned long data)
+static void lbs_cmd_timeout_handler(unsigned long data)
{
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
@@ -809,18 +792,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
return 0;
}
-static void lbs_sync_channel_worker(struct work_struct *work)
-{
- struct lbs_private *priv = container_of(work, struct lbs_private,
- sync_channel);
-
- lbs_deb_enter(LBS_DEB_MAIN);
- if (lbs_update_channel(priv))
- lbs_pr_info("Channel synchronization failed.");
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-
static int lbs_init_adapter(struct lbs_private *priv)
{
size_t bufsize;
@@ -848,14 +819,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
memset(priv->current_addr, 0xff, ETH_ALEN);
priv->connect_status = LBS_DISCONNECTED;
- priv->mesh_connect_status = LBS_DISCONNECTED;
priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
priv->mode = IW_MODE_INFRA;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
priv->radio_on = 1;
priv->enablehwauto = 1;
- priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
@@ -865,7 +834,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
mutex_init(&priv->lock);
- setup_timer(&priv->command_timer, command_timer_fn,
+ setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
(unsigned long)priv);
setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
(unsigned long)priv);
@@ -998,11 +967,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
- INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
-
- priv->mesh_open = 0;
- sprintf(priv->mesh_ssid, "mesh");
- priv->mesh_ssid_len = 4;
priv->wol_criteria = 0xffffffff;
priv->wol_gpio = 0xff;
@@ -1076,6 +1040,17 @@ void lbs_remove_card(struct lbs_private *priv)
EXPORT_SYMBOL_GPL(lbs_remove_card);
+static int lbs_rtap_supported(struct lbs_private *priv)
+{
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
+ return 1;
+
+ /* newer firmware use a capability mask */
+ return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
+}
+
+
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
@@ -1095,12 +1070,14 @@ int lbs_start_card(struct lbs_private *priv)
lbs_update_channel(priv);
+ lbs_init_mesh(priv);
+
/*
* While rtap isn't related to mesh, only mesh-enabled
* firmware implements the rtap functionality via
* CMD_802_11_MONITOR_MODE.
*/
- if (lbs_init_mesh(priv)) {
+ if (lbs_rtap_supported(priv)) {
if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
lbs_pr_err("cannot register lbs_rtap attribute\n");
}
@@ -1134,7 +1111,9 @@ void lbs_stop_card(struct lbs_private *priv)
netif_carrier_off(dev);
lbs_debugfs_remove_one(priv);
- if (lbs_deinit_mesh(priv))
+ lbs_deinit_mesh(priv);
+
+ if (lbs_rtap_supported(priv))
device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
/* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e4..e385af1f4583 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MESH);
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ priv->mesh_connect_status = LBS_DISCONNECTED;
+
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
/* Enable mesh, if supported, and work out which TLV it uses.
0x100 + 291 is an unofficial value used in 5.110.20.pXX
0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else
+ if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
/* 10.0.0.pXX new firmwares should succeed with TLV
* 0x100+37; Do not invoke command with old TLV.
*/
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
+
+
if (priv->mesh_tlv) {
+ sprintf(priv->mesh_ssid, "mesh");
+ priv->mesh_ssid_len = 4;
+
lbs_add_mesh(priv);
if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
struct net_device *dev, struct rxpd *rxpd)
{
if (priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
if (rxpd->rx_control & RxPD_MESH_FRAME)
dev = priv->mesh_dev;
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
dev = priv->mesh_dev;
}
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
struct net_device *dev, struct txpd *txpd)
{
if (dev == priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD)
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
- else if (priv->mesh_fw_ver == MESH_FW_NEW)
+ else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
txpd->u.bss.bss_num = MESH_IFACE_ID;
}
}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
* Command id is 0xac for v10 FW along with mesh interface
* id in bits 14-13-12.
*/
- if (priv->mesh_fw_ver == MESH_FW_NEW)
+ if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
command = CMD_MESH_CONFIG |
(MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005fc..e2573303a328 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
#include <net/lib80211.h>
+#ifdef CONFIG_LIBERTAS_MESH
+
/* Mesh statistics */
struct lbs_mesh_stats {
u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
/* Command handling */
struct cmd_ds_command;
+struct cmd_ds_mesh_access;
+struct cmd_ds_mesh_config;
int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+ struct cmd_ds_mesh_access *cmd);
+int lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type);
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s);
+/* Accessors */
+
+#define lbs_mesh_open(priv) (priv->mesh_open)
+#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
+
+#else
+
+#define lbs_init_mesh(priv)
+#define lbs_deinit_mesh(priv)
+#define lbs_add_mesh(priv)
+#define lbs_remove_mesh(priv)
+#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
+#define lbs_mesh_set_txpd(priv, dev, txpd)
+#define lbs_mesh_config(priv, enable, chan)
+#define lbs_mesh_open(priv) (0)
+#define lbs_mesh_connected(priv) (0)
+
+#endif
+
+
+
#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c7841500..220361e69cd3 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+ if (priv->mesh_dev && lbs_mesh_connected(priv) &&
!priv->tx_pending_len)
netif_wake_queue(priv->mesh_dev);
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286ca..52d244ea3d97 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED))
+ if (priv->mesh_dev && lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a84..71f88a08e090 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
lbs_deb_enter(LBS_DEB_WEXT);
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
memcpy(rates, lbs_bg_rates, MAX_RATES);
else
memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
/* Use nickname to indicate that mesh is on */
- if (priv->mesh_connect_status == LBS_CONNECTED) {
+ if (lbs_mesh_connected(priv)) {
strncpy(extra, "Mesh", 12);
extra[12] = '\0';
dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info, u32 * uwrq,
char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_get_txpow(struct net_device *dev,
struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
/* If we're not associated, all quality values are meaningless */
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
goto out;
/* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_set_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_get_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
/**
* @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
+struct iw_handler_def lbs_handler_def = {
+ .num_standard = ARRAY_SIZE(lbs_handler),
+ .standard = (iw_handler *) lbs_handler,
+ .get_wireless_stats = lbs_get_wireless_stats,
+};
+#ifdef CONFIG_LIBERTAS_MESH
static const iw_handler mesh_wlan_handler[] = {
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
-struct iw_handler_def lbs_handler_def = {
- .num_standard = ARRAY_SIZE(lbs_handler),
- .standard = (iw_handler *) lbs_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
struct iw_handler_def mesh_handler_def = {
.num_standard = ARRAY_SIZE(mesh_wlan_handler),
.standard = (iw_handler *) mesh_wlan_handler,
.get_wireless_stats = lbs_get_wireless_stats,
};
+#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb03..6ab30033c26c 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
if (priv->vif != NULL)
return -EOPNOTSUPP;
- priv->vif = conf->vif;
- switch (conf->type) {
+ priv->vif = vif;
+ switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
priv->vif = NULL;
return -EOPNOTSUPP;
}
- lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
+ lbtf_set_mac_address(priv, (u8 *) vif->addr);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
@@ -555,6 +555,9 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7fd..00ffe6dd435e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,10 @@ static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
+static bool fake_hw_scan;
+module_param(fake_hw_scan, bool, 0444);
+MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -281,6 +285,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct mac_address addresses[2];
+
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
@@ -436,6 +442,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
}
+struct mac80211_hwsim_addr_match_data {
+ bool ret;
+ const u8 *addr;
+};
+
+static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_addr_match_data *md = data;
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ md->ret = true;
+}
+
+
+static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
+ const u8 *addr)
+{
+ struct mac80211_hwsim_addr_match_data md;
+
+ if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
+ return true;
+
+ md.ret = false;
+ md.addr = addr;
+ ieee80211_iterate_active_interfaces_atomic(data->hw,
+ mac80211_hwsim_addr_iter,
+ &md);
+
+ return md.ret;
+}
+
+
static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -488,8 +526,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
if (nskb == NULL)
continue;
- if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
- ETH_ALEN) == 0)
+ if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +590,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_set_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_set_magic(vif);
return 0;
}
static void mac80211_hwsim_remove_interface(
- struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf)
+ struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_check_magic(conf->vif);
- hwsim_clear_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_check_magic(vif);
+ hwsim_clear_magic(vif);
}
@@ -618,12 +655,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
-
- printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
+ static const char *chantypes[4] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+ };
+ static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+ };
+
+ printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
+ chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS));
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
@@ -827,7 +878,77 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-static const struct ieee80211_ops mac80211_hwsim_ops =
+static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
+{
+ /*
+ * In this special case, there's nothing we need to
+ * do because hwsim does transmission synchronously.
+ * In the future, when it does transmissions via
+ * userspace, we may need to do something.
+ */
+}
+
+struct hw_scan_done {
+ struct delayed_work w;
+ struct ieee80211_hw *hw;
+};
+
+static void hw_scan_done(struct work_struct *work)
+{
+ struct hw_scan_done *hsd =
+ container_of(work, struct hw_scan_done, w.work);
+
+ ieee80211_scan_completed(hsd->hw, false);
+ kfree(hsd);
+}
+
+static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
+{
+ struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
+ int i;
+
+ if (!hsd)
+ return -ENOMEM;
+
+ hsd->hw = hw;
+ INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+
+ printk(KERN_DEBUG "hwsim scan request\n");
+ for (i = 0; i < req->n_channels; i++)
+ printk(KERN_DEBUG "hwsim scan freq %d\n",
+ req->channels[i]->center_freq);
+
+ ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -841,6 +962,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
+ .ampdu_action = mac80211_hwsim_ampdu_action,
+ .flush = mac80211_hwsim_flush,
};
@@ -1035,6 +1158,9 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
+ if (fake_hw_scan)
+ mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1072,7 +1198,11 @@ static int __init init_mac80211_hwsim(void)
SET_IEEE80211_DEV(hw, data->dev);
addr[3] = i >> 8;
addr[4] = i;
- SET_IEEE80211_PERM_ADDR(hw, addr);
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
hw->channel_change_time = 1;
hw->queues = 4;
@@ -1082,7 +1212,9 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_MESH_POINT);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM;
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59f92105b0c2..0cfdb9db66f7 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
- * Copyright (C) 2008-2009 Marvell Semiconductor Inc.
+ * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.10"
+#define MWL8K_VERSION "0.12"
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
char *part_name;
char *helper_image;
char *fw_image;
- struct rxd_ops *rxd_ops;
- u16 modes;
+ struct rxd_ops *ap_rxd_ops;
};
struct mwl8k_rx_queue {
@@ -120,34 +119,36 @@ struct mwl8k_tx_queue {
/* sw appends here */
int tail;
- struct ieee80211_tx_queue_stats stats;
+ unsigned int len;
struct mwl8k_tx_desc *txd;
dma_addr_t txd_dma;
struct sk_buff **skb;
};
-/* Pointers to the firmware data and meta information about it. */
-struct mwl8k_firmware {
- /* Boot helper code */
- struct firmware *helper;
+struct mwl8k_priv {
+ struct ieee80211_hw *hw;
+ struct pci_dev *pdev;
- /* Microcode */
- struct firmware *ucode;
-};
+ struct mwl8k_device_info *device_info;
-struct mwl8k_priv {
void __iomem *sram;
void __iomem *regs;
- struct ieee80211_hw *hw;
- struct pci_dev *pdev;
+ /* firmware */
+ struct firmware *fw_helper;
+ struct firmware *fw_ucode;
- struct mwl8k_device_info *device_info;
+ /* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
-
- /* firmware files and meta data */
- struct mwl8k_firmware fw;
+ struct ieee80211_supported_band band_24;
+ struct ieee80211_channel channels_24[14];
+ struct ieee80211_rate rates_24[14];
+ struct ieee80211_supported_band band_50;
+ struct ieee80211_channel channels_50[4];
+ struct ieee80211_rate rates_50[9];
+ u32 ap_macids_supported;
+ u32 sta_macids_supported;
/* firmware access */
struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
- struct ieee80211_vif *vif;
-
- struct ieee80211_channel *current_channel;
+ /* List of interfaces. */
+ u32 macids_used;
+ struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
@@ -182,16 +183,15 @@ struct mwl8k_priv {
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
- /* PHY parameters */
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[14];
-
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
bool wmm_enabled;
+ struct work_struct sta_notify_worker;
+ spinlock_t sta_notify_list_lock;
+ struct list_head sta_notify_list;
+
/* XXX need to convert this to handle multiple interfaces */
bool capture_beacon;
u8 capture_bssid[ETH_ALEN];
@@ -205,32 +205,33 @@ struct mwl8k_priv {
*/
struct work_struct finalize_join_worker;
- /* Tasklet to reclaim TX descriptors and buffers after tx */
- struct tasklet_struct tx_reclaim_task;
+ /* Tasklet to perform TX reclaim. */
+ struct tasklet_struct poll_tx_task;
+
+ /* Tasklet to perform RX. */
+ struct tasklet_struct poll_rx_task;
};
/* Per interface specific private data */
struct mwl8k_vif {
- /* backpointer to parent config block */
- struct mwl8k_priv *priv;
-
- /* BSS config of AP or IBSS from mac80211*/
- struct ieee80211_bss_conf bss_info;
-
- /* BSSID of AP or IBSS */
- u8 bssid[ETH_ALEN];
- u8 mac_addr[ETH_ALEN];
+ struct list_head list;
+ struct ieee80211_vif *vif;
- /* Index into station database.Returned by update_sta_db call */
- u8 peer_id;
+ /* Firmware macid for this vif. */
+ int macid;
- /* Non AMPDU sequence number assigned by driver */
- u16 seqno;
+ /* Non AMPDU sequence number assigned by driver. */
+ u16 seqno;
};
-
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
-static const struct ieee80211_channel mwl8k_channels[] = {
+struct mwl8k_sta {
+ /* Index into station database. Returned by UPDATE_STADB. */
+ u8 peer_id;
+};
+#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
+
+static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +243,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
+ { .center_freq = 2467, .hw_value = 12, },
+ { .center_freq = 2472, .hw_value = 13, },
+ { .center_freq = 2484, .hw_value = 14, },
};
-static const struct ieee80211_rate mwl8k_rates[] = {
+static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
@@ -261,8 +265,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 720, .hw_value = 144, },
};
-static const u8 mwl8k_rateids[12] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
+static const struct ieee80211_channel mwl8k_channels_50[] = {
+ { .center_freq = 5180, .hw_value = 36, },
+ { .center_freq = 5200, .hw_value = 40, },
+ { .center_freq = 5220, .hw_value = 44, },
+ { .center_freq = 5240, .hw_value = 48, },
+};
+
+static const struct ieee80211_rate mwl8k_rates_50[] = {
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
@@ -278,6 +297,7 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_RF_ANTENNA 0x0020
+#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +311,10 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
-#define MWL8K_CMD_SET_MAC_ADDR 0x0202
+#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
+#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
+#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +332,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
+ MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +348,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
+ MWL8K_CMDNAME(BSS_START);
+ MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +380,8 @@ static void mwl8k_release_fw(struct firmware **fw)
static void mwl8k_release_firmware(struct mwl8k_priv *priv)
{
- mwl8k_release_fw(&priv->fw.ucode);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_ucode);
+ mwl8k_release_fw(&priv->fw_helper);
}
/* Request fw image */
@@ -377,7 +402,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
int rc;
if (di->helper_image != NULL) {
- rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
+ rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
if (rc) {
printk(KERN_ERR "%s: Error requesting helper "
"firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +411,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
}
}
- rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
+ rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
pci_name(priv->pdev), di->fw_image);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_helper);
return rc;
}
return 0;
}
-MODULE_FIRMWARE("mwl8k/helper_8687.fw");
-MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
-
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
- __le16 seq_num;
+ __u8 seq_num;
+ __u8 macid;
__le16 result;
char payload[0];
} __attribute__((packed));
@@ -461,6 +484,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
+ cmd->macid = 0;
cmd->result = 0;
done = 0;
@@ -551,13 +575,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
- struct firmware *fw = priv->fw.ucode;
- struct mwl8k_device_info *di = priv->device_info;
+ struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
- struct firmware *helper = priv->fw.helper;
+ struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +607,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
return rc;
}
- if (di->modes & BIT(NL80211_IFTYPE_AP))
- iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
- else
- iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
+ iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
loops = 500000;
do {
@@ -610,91 +630,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
}
-/*
- * Defines shared between transmission and reception.
- */
-/* HT control fields for firmware */
-struct ewc_ht_info {
- __le16 control1;
- __le16 control2;
- __le16 control3;
-} __attribute__((packed));
-
-/* Firmware Station database operations */
-#define MWL8K_STA_DB_ADD_ENTRY 0
-#define MWL8K_STA_DB_MODIFY_ENTRY 1
-#define MWL8K_STA_DB_DEL_ENTRY 2
-#define MWL8K_STA_DB_FLUSH 3
-
-/* Peer Entry flags - used to define the type of the peer node */
-#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-
-struct peer_capability_info {
- /* Peer type - AP vs. STA. */
- __u8 peer_type;
-
- /* Basic 802.11 capabilities from assoc resp. */
- __le16 basic_caps;
-
- /* Set if peer supports 802.11n high throughput (HT). */
- __u8 ht_support;
-
- /* Valid if HT is supported. */
- __le16 ht_caps;
- __u8 extended_ht_caps;
- struct ewc_ht_info ewc_info;
-
- /* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[12];
-
- /* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[16];
- __u8 pad[16];
-
- /* If set, interoperability mode, no proprietary extensions. */
- __u8 interop;
- __u8 pad2;
- __u8 station_id;
- __le16 amsdu_enabled;
-} __attribute__((packed));
-
-/* Inline functions to manipulate QoS field in data descriptor. */
-static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
-{
- u16 val_mask = 1 << 4;
-
- /* End of Service Period Bit 4 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
-{
- u16 val_mask = 0x3;
- u8 shift = 5;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Ack Policy Bit 5-6 */
- return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
-}
-
-static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
-{
- u16 val_mask = 1 << 7;
-
- /* AMSDU present Bit 7 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
-{
- u16 val_mask = 0xff;
- u8 shift = 8;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Queue Length Bits 8-15 */
- return (qos & qos_mask) | ((len & val_mask) << shift);
-}
-
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
@@ -761,9 +696,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
/*
- * Packet reception for 88w8366.
+ * Packet reception for 88w8366 AP firmware.
*/
-struct mwl8k_rxd_8366 {
+struct mwl8k_rxd_8366_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
@@ -781,23 +716,23 @@ struct mwl8k_rxd_8366 {
__u8 rx_ctrl;
} __attribute__((packed));
-#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80
-#define MWL8K_8366_RATE_INFO_40MHZ 0x40
-#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f)
+#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
+#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
-#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
+#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
-static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +741,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
- __le16 *qos)
+mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
- if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -820,23 +755,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_floor;
- if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) {
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ)
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate);
+ status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
- for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
- if (mwl8k_rates[i].hw_value == rxd->rate) {
+ for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
+ if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -844,17 +785,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8366_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8366),
- .rxd_init = mwl8k_rxd_8366_init,
- .rxd_refill = mwl8k_rxd_8366_refill,
- .rxd_process = mwl8k_rxd_8366_process,
+static struct rxd_ops rxd_8366_ap_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
+ .rxd_init = mwl8k_rxd_8366_ap_init,
+ .rxd_refill = mwl8k_rxd_8366_ap_refill,
+ .rxd_process = mwl8k_rxd_8366_ap_process,
};
/*
- * Packet reception for 88w8687.
+ * Packet reception for STA firmware.
*/
-struct mwl8k_rxd_8687 {
+struct mwl8k_rxd_sta {
__le16 pkt_len;
__u8 link_quality;
__u8 noise_level;
@@ -871,26 +812,26 @@ struct mwl8k_rxd_8687 {
__u8 pad2[2];
} __attribute__((packed));
-#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
-#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
-#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
-#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
-#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
-#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
+#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
+#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
+#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
+#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
+#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
+#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
-#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
-static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +840,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
+mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
u16 rate_info;
- if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -915,19 +856,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_level;
- status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
- status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
+ status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
+ status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
status->flag |= RX_FLAG_SHORTPRE;
- if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
+ if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
status->flag |= RX_FLAG_SHORT_GI;
- if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
+ if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -935,11 +882,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8687_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8687),
- .rxd_init = mwl8k_rxd_8687_init,
- .rxd_refill = mwl8k_rxd_8687_refill,
- .rxd_process = mwl8k_rxd_8687_process,
+static struct rxd_ops rxd_sta_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_sta),
+ .rxd_init = mwl8k_rxd_sta_init,
+ .rxd_refill = mwl8k_rxd_sta_refill,
+ .rxd_process = mwl8k_rxd_sta_process,
};
@@ -1153,16 +1100,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
* Packet transmission.
*/
-/* Transmit packet ACK policy */
-#define MWL8K_TXD_ACK_POLICY_NORMAL 0
-#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
-
#define MWL8K_TXD_STATUS_OK 0x00000001
#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
+#define MWL8K_QOS_QLEN_UNSPEC 0xff00
+#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
+#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
+#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
+#define MWL8K_QOS_EOSP 0x0010
+
struct mwl8k_tx_desc {
__le32 status;
__u8 data_rate;
@@ -1187,8 +1136,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
int size;
int i;
- memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
- txq->stats.limit = MWL8K_TX_DESCS;
+ txq->len = 0;
txq->head = 0;
txq->tail = 0;
@@ -1264,7 +1212,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
"fw_owned=%d drv_owned=%d unused=%d\n",
wiphy_name(hw->wiphy), i,
- txq->stats.len, txq->head, txq->tail,
+ txq->len, txq->head, txq->tail,
fw_owned, drv_owned, unused);
}
}
@@ -1272,7 +1220,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
-#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
@@ -1316,8 +1264,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
}
if (priv->pending_tx_pkts < oldcount) {
- printk(KERN_NOTICE "%s: timeout waiting for tx "
- "rings to drain (%d -> %d pkts), retrying\n",
+ printk(KERN_NOTICE "%s: waiting for tx rings "
+ "to drain (%d -> %d pkts)\n",
wiphy_name(hw->wiphy), oldcount,
priv->pending_tx_pkts);
retry = 1;
@@ -1342,13 +1290,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
-static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
+static int
+mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- int wake = 0;
+ int processed;
- while (txq->stats.len > 0) {
+ processed = 0;
+ while (txq->len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1370,8 +1320,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
}
txq->head = (tx + 1) % MWL8K_TX_DESCS;
- BUG_ON(txq->stats.len == 0);
- txq->stats.len--;
+ BUG_ON(txq->len == 0);
+ txq->len--;
priv->pending_tx_pkts--;
addr = le32_to_cpu(tx_desc->pkt_phys_addr);
@@ -1395,11 +1345,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
ieee80211_tx_status_irqsafe(hw, skb);
- wake = 1;
+ processed++;
}
- if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
+ if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
ieee80211_wake_queue(hw, index);
+
+ return processed;
}
/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1360,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- mwl8k_txq_reclaim(hw, index, 1);
+ mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
@@ -1446,11 +1398,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- u16 seqno = mwl8k_vif->seqno;
-
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- wh->seq_ctrl |= cpu_to_le16(seqno << 4);
- mwl8k_vif->seqno = seqno++ % 4096;
+ wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
+ mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1409,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
if (ieee80211_is_mgmt(wh->frame_control) ||
ieee80211_is_ctl(wh->frame_control)) {
txdatarate = 0;
- qos = mwl8k_qos_setbit_eosp(qos);
- /* Set Queue size to unspecified */
- qos = mwl8k_qos_setbit_qlen(qos, 0xff);
+ qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
} else if (ieee80211_is_data(wh->frame_control)) {
txdatarate = 1;
if (is_multicast_ether_addr(wh->addr1))
txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
- /* Send pkt in an aggregate if AMPDU frame. */
+ qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_BLOCKACK);
+ qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
else
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_NORMAL);
-
- if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- qos = mwl8k_qos_setbit_amsdu(qos);
+ qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,12 +1446,14 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- tx->peer_id = mwl8k_vif->peer_id;
+ if (!priv->ap_fw && tx_info->control.sta != NULL)
+ tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ else
+ tx->peer_id = 0;
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
- txq->stats.count++;
- txq->stats.len++;
+ txq->len++;
priv->pending_tx_pkts++;
txq->tail++;
@@ -1656,6 +1601,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
return rc;
}
+static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct mwl8k_cmd_pkt *cmd)
+{
+ if (vif != NULL)
+ cmd->macid = MWL8K_VIF(vif)->macid;
+ return mwl8k_post_cmd(hw, cmd);
+}
+
+/*
+ * Setup code shared between STA and AP firmware images.
+ */
+static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
+ memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
+
+ BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
+ memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
+
+ priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.channels = priv->channels_24;
+ priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
+ priv->band_24.bitrates = priv->rates_24;
+ priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+}
+
+static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
+ memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
+
+ BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
+ memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
+
+ priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.channels = priv->channels_50;
+ priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
+ priv->band_50.bitrates = priv->rates_50;
+ priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+}
+
/*
* CMD_GET_HW_SPEC (STA version).
*/
@@ -1678,6 +1673,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 total_rxd;
} __attribute__((packed));
+#define MWL8K_CAP_MAX_AMSDU 0x20000000
+#define MWL8K_CAP_GREENFIELD 0x08000000
+#define MWL8K_CAP_AMPDU 0x04000000
+#define MWL8K_CAP_RX_STBC 0x01000000
+#define MWL8K_CAP_TX_STBC 0x00800000
+#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
+#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
+#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
+#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
+#define MWL8K_CAP_DELAY_BA 0x00003000
+#define MWL8K_CAP_MIMO 0x00000200
+#define MWL8K_CAP_40MHZ 0x00000100
+#define MWL8K_CAP_BAND_MASK 0x00000007
+#define MWL8K_CAP_5GHZ 0x00000004
+#define MWL8K_CAP_2GHZ4 0x00000001
+
+static void
+mwl8k_set_ht_caps(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *band, u32 cap)
+{
+ int rx_streams;
+ int tx_streams;
+
+ band->ht_cap.ht_supported = 1;
+
+ if (cap & MWL8K_CAP_MAX_AMSDU)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+ if (cap & MWL8K_CAP_GREENFIELD)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
+ if (cap & MWL8K_CAP_AMPDU) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ }
+ if (cap & MWL8K_CAP_RX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
+ if (cap & MWL8K_CAP_TX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+ if (cap & MWL8K_CAP_SHORTGI_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ if (cap & MWL8K_CAP_SHORTGI_20MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ if (cap & MWL8K_CAP_DELAY_BA)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
+ if (cap & MWL8K_CAP_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
+ tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
+
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ band->ht_cap.mcs.rx_mask[1] = 0xff;
+ if (rx_streams >= 3)
+ band->ht_cap.mcs.rx_mask[2] = 0xff;
+ band->ht_cap.mcs.rx_mask[4] = 0x01;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ if (rx_streams != tx_streams) {
+ band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
+}
+
+static void
+mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
+ mwl8k_setup_2ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_24, caps);
+ }
+
+ if (caps & MWL8K_CAP_5GHZ) {
+ mwl8k_setup_5ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_50, caps);
+ }
+}
+
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1786,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
+ priv->ap_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
@@ -1761,6 +1842,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_setup_2ghz_band(hw);
+ priv->ap_macids_supported = 0x000000ff;
+ priv->sta_macids_supported = 0x00000000;
off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1890,9 @@ struct mwl8k_cmd_set_hw_spec {
__le32 total_rxd;
} __attribute__((packed));
-#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
@@ -1827,7 +1913,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
- cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+ cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -1897,9 +1985,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
}
/*
- * CMD_802_11_GET_STAT.
+ * CMD_GET_STAT.
*/
-struct mwl8k_cmd_802_11_get_stat {
+struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
} __attribute__((packed));
@@ -1909,10 +1997,10 @@ struct mwl8k_cmd_802_11_get_stat {
#define MWL8K_STAT_FCS_ERROR 24
#define MWL8K_STAT_RTS_SUCCESS 11
-static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_cmd_802_11_get_stat *cmd;
+ struct mwl8k_cmd_get_stat *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2027,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
}
/*
- * CMD_802_11_RADIO_CONTROL.
+ * CMD_RADIO_CONTROL.
*/
-struct mwl8k_cmd_802_11_radio_control {
+struct mwl8k_cmd_radio_control {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 control;
@@ -1949,10 +2037,10 @@ struct mwl8k_cmd_802_11_radio_control {
} __attribute__((packed));
static int
-mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
+mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_802_11_radio_control *cmd;
+ struct mwl8k_cmd_radio_control *cmd;
int rc;
if (enable == priv->radio_on && !force)
@@ -1977,36 +2065,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
return rc;
}
-static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
+ return mwl8k_cmd_radio_control(hw, 0, 0);
}
-static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
+ return mwl8k_cmd_radio_control(hw, 1, 0);
}
static int
mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
{
- struct mwl8k_priv *priv;
-
- if (hw == NULL || hw->priv == NULL)
- return -EINVAL;
- priv = hw->priv;
+ struct mwl8k_priv *priv = hw->priv;
priv->radio_short_preamble = short_preamble;
- return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
+ return mwl8k_cmd_radio_control(hw, 1, 1);
}
/*
- * CMD_802_11_RF_TX_POWER.
+ * CMD_RF_TX_POWER.
*/
#define MWL8K_TX_POWER_LEVEL_TOTAL 8
-struct mwl8k_cmd_802_11_rf_tx_power {
+struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 support_level;
@@ -2015,9 +2099,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
} __attribute__((packed));
-static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
+static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
- struct mwl8k_cmd_802_11_rf_tx_power *cmd;
+ struct mwl8k_cmd_rf_tx_power *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2153,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
}
/*
+ * CMD_SET_BEACON.
+ */
+struct mwl8k_cmd_set_beacon {
+ struct mwl8k_cmd_pkt header;
+ __le16 beacon_len;
+ __u8 beacon[0];
+};
+
+static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *beacon, int len)
+{
+ struct mwl8k_cmd_set_beacon *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
+ cmd->beacon_len = cpu_to_le16(len);
+ memcpy(cmd->beacon, beacon, len);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2217,7 @@ struct mwl8k_cmd_set_post_scan {
} __attribute__((packed));
static int
-mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
+mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
@@ -2134,8 +2248,9 @@ struct mwl8k_cmd_set_rf_channel {
} __attribute__((packed));
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel)
+ struct ieee80211_conf *conf)
{
+ struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
@@ -2147,10 +2262,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
+
if (channel->band == IEEE80211_BAND_2GHZ)
- cmd->channel_flags = cpu_to_le32(0x00000081);
- else
- cmd->channel_flags = cpu_to_le32(0x00000000);
+ cmd->channel_flags |= cpu_to_le32(0x00000001);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->channel_flags |= cpu_to_le32(0x00000004);
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2159,85 +2283,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_SLOT.
+ * CMD_SET_AID.
*/
-struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
- __le16 action;
- __u8 short_slot;
-} __attribute__((packed));
-
-static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
-{
- struct mwl8k_cmd_set_slot *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->short_slot = short_slot_time;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+#define MWL8K_FRAME_PROT_DISABLED 0x00
+#define MWL8K_FRAME_PROT_11G 0x07
+#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
+#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
- return rc;
-}
+struct mwl8k_cmd_update_set_aid {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
-/*
- * CMD_MIMO_CONFIG.
- */
-struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
- __le32 action;
- __u8 rx_antenna_map;
- __u8 tx_antenna_map;
+ /* AP's MAC address (BSSID) */
+ __u8 bssid[ETH_ALEN];
+ __le16 protection_mode;
+ __u8 supp_rates[14];
} __attribute__((packed));
-static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
- struct mwl8k_cmd_mimo_config *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
- cmd->rx_antenna_map = rx;
- cmd->tx_antenna_map = tx;
+ int i;
+ int j;
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+ /*
+ * Clear nonstandard rates 4 and 13.
+ */
+ mask &= 0x1fef;
- return rc;
+ for (i = 0, j = 0; i < 14; i++) {
+ if (mask & (1 << i))
+ rates[j++] = mwl8k_rates_24[i].hw_value;
+ }
}
-/*
- * CMD_ENABLE_SNIFFER.
- */
-struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
- __le32 action;
-} __attribute__((packed));
-
-static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
- struct mwl8k_cmd_enable_sniffer *cmd;
+ struct mwl8k_cmd_update_set_aid *cmd;
+ u16 prot_mode;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32(!!enable);
+ cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ if (vif->bss_conf.use_cts_prot) {
+ prot_mode = MWL8K_FRAME_PROT_11G;
+ } else {
+ switch (vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
+ break;
+ default:
+ prot_mode = MWL8K_FRAME_PROT_DISABLED;
+ break;
+ }
+ }
+ cmd->protection_mode = cpu_to_le16(prot_mode);
+
+ legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2246,37 +2360,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
/*
- * CMD_SET_MAC_ADDR.
+ * CMD_SET_RATE.
*/
-struct mwl8k_cmd_set_mac_addr {
- struct mwl8k_cmd_pkt header;
- union {
- struct {
- __le16 mac_type;
- __u8 mac_addr[ETH_ALEN];
- } mbss;
- __u8 mac_addr[ETH_ALEN];
- };
+struct mwl8k_cmd_set_rate {
+ struct mwl8k_cmd_pkt header;
+ __u8 legacy_rates[14];
+
+ /* Bitmap for supported MCS codes. */
+ __u8 mcs_set[16];
+ __u8 reserved[16];
} __attribute__((packed));
-static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+static int
+mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 legacy_rate_mask, u8 *mcs_rates)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_mac_addr *cmd;
+ struct mwl8k_cmd_set_rate *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- if (priv->ap_fw) {
- cmd->mbss.mac_type = 0;
- memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
- } else {
- memcpy(cmd->mac_addr, mac, ETH_ALEN);
- }
+ legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
+ memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2284,29 +2393,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
return rc;
}
-
/*
- * CMD_SET_RATEADAPT_MODE.
+ * CMD_FINALIZE_JOIN.
*/
-struct mwl8k_cmd_set_rate_adapt_mode {
+#define MWL8K_FJ_BEACON_MAXLEN 128
+
+struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
- __le16 action;
- __le16 mode;
+ __le32 sleep_interval; /* Number of beacon periods to sleep */
+ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __attribute__((packed));
-static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
+static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
+ int framelen, int dtim)
{
- struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ struct mwl8k_cmd_finalize_join *cmd;
+ struct ieee80211_mgmt *payload = frame;
+ int payload_len;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->mode = cpu_to_le16(mode);
+ cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
+
+ payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
+ if (payload_len < 0)
+ payload_len = 0;
+ else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
+ payload_len = MWL8K_FJ_BEACON_MAXLEN;
+
+ memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2315,59 +2435,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
}
/*
- * CMD_SET_WMM_MODE.
+ * CMD_SET_RTS_THRESHOLD.
*/
-struct mwl8k_cmd_set_wmm {
+struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
+ __le16 threshold;
} __attribute__((packed));
-static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_wmm *cmd;
+ struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(!!enable);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
- if (!rc)
- priv->wmm_enabled = enable;
-
return rc;
}
/*
- * CMD_SET_RTS_THRESHOLD.
+ * CMD_SET_SLOT.
*/
-struct mwl8k_cmd_rts_threshold {
+struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
- __le16 threshold;
+ __u8 short_slot;
} __attribute__((packed));
-static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
- u16 action, u16 threshold)
+static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
- struct mwl8k_cmd_rts_threshold *cmd;
+ struct mwl8k_cmd_set_slot *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(action);
- cmd->threshold = cpu_to_le16(threshold);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->short_slot = short_slot_time;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2426,9 +2544,9 @@ struct mwl8k_cmd_set_edca_params {
MWL8K_SET_EDCA_AIFS)
static int
-mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
- __u16 cw_min, __u16 cw_max,
- __u8 aifs, __u16 txop)
+mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
+ __u16 cw_min, __u16 cw_max,
+ __u8 aifs, __u16 txop)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2556,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
if (cmd == NULL)
return -ENOMEM;
- /*
- * Queues 0 (BE) and 1 (BK) are swapped in hardware for
- * this call.
- */
- qnum ^= !(qnum >> 1);
-
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2579,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
}
/*
- * CMD_FINALIZE_JOIN.
+ * CMD_SET_WMM_MODE.
*/
-#define MWL8K_FJ_BEACON_MAXLEN 128
-
-struct mwl8k_cmd_finalize_join {
+struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
- __le32 sleep_interval; /* Number of beacon periods to sleep */
- __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
+ __le16 action;
} __attribute__((packed));
-static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
- int framelen, int dtim)
+static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_cmd_finalize_join *cmd;
- struct ieee80211_mgmt *payload = frame;
- int payload_len;
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_cmd_set_wmm_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
-
- payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
- if (payload_len < 0)
- payload_len = 0;
- else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
- payload_len = MWL8K_FJ_BEACON_MAXLEN;
-
- memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
+ cmd->action = cpu_to_le16(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
+ if (!rc)
+ priv->wmm_enabled = enable;
+
return rc;
}
/*
- * CMD_UPDATE_STADB.
+ * CMD_MIMO_CONFIG.
*/
-struct mwl8k_cmd_update_sta_db {
+struct mwl8k_cmd_mimo_config {
struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __u8 rx_antenna_map;
+ __u8 tx_antenna_map;
+} __attribute__((packed));
- /* See STADB_ACTION_TYPE */
- __le32 action;
+static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+{
+ struct mwl8k_cmd_mimo_config *cmd;
+ int rc;
- /* Peer MAC address */
- __u8 peer_addr[ETH_ALEN];
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- __le32 reserved;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
+ cmd->rx_antenna_map = rx;
+ cmd->tx_antenna_map = tx;
- /* Peer info - valid during add/update. */
- struct peer_capability_info peer_info;
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_USE_FIXED_RATE (STA version).
+ */
+struct mwl8k_cmd_use_fixed_rate_sta {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[8];
+ __le32 rate_type;
+ __le32 reserved1;
+ __le32 reserved2;
} __attribute__((packed));
-static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, __u32 action)
+#define MWL8K_USE_AUTO_RATE 0x0002
+#define MWL8K_UCAST_RATE 0
+
+static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_sta_db *cmd;
- struct peer_capability_info *peer_info;
+ struct mwl8k_cmd_use_fixed_rate_sta *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
- cmd->action = cpu_to_le32(action);
- peer_info = &cmd->peer_info;
- memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- switch (action) {
- case MWL8K_STA_DB_ADD_ENTRY:
- case MWL8K_STA_DB_MODIFY_ENTRY:
- /* Build peer_info block */
- peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
- peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
- memcpy(peer_info->legacy_rates, mwl8k_rateids,
- sizeof(mwl8k_rateids));
- peer_info->interop = 1;
- peer_info->amsdu_enabled = 0;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = peer_info->station_id;
+ return rc;
+}
- break;
+/*
+ * CMD_USE_FIXED_RATE (AP version).
+ */
+struct mwl8k_cmd_use_fixed_rate_ap {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct mwl8k_rate_entry_ap {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[4];
+ u8 multicast_rate;
+ u8 multicast_rate_type;
+ u8 management_rate;
+} __attribute__((packed));
- case MWL8K_STA_DB_DEL_ENTRY:
- case MWL8K_STA_DB_FLUSH:
- default:
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = 0;
- break;
- }
+static int
+mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
+{
+ struct mwl8k_cmd_use_fixed_rate_ap *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->multicast_rate = mcast;
+ cmd->management_rate = mgmt;
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_SET_AID.
+ * CMD_ENABLE_SNIFFER.
*/
-#define MWL8K_FRAME_PROT_DISABLED 0x00
-#define MWL8K_FRAME_PROT_11G 0x07
-#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
-#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
-
-struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
- __le16 aid;
-
- /* AP's MAC address (BSSID) */
- __u8 bssid[ETH_ALEN];
- __le16 protection_mode;
- __u8 supp_rates[14];
+struct mwl8k_cmd_enable_sniffer {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
} __attribute__((packed));
-static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_set_aid *cmd;
- u16 prot_mode;
+ struct mwl8k_cmd_enable_sniffer *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(info->aid);
+ cmd->action = cpu_to_le32(!!enable);
- memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (info->use_cts_prot) {
- prot_mode = MWL8K_FRAME_PROT_11G;
+ return rc;
+}
+
+/*
+ * CMD_SET_MAC_ADDR.
+ */
+struct mwl8k_cmd_set_mac_addr {
+ struct mwl8k_cmd_pkt header;
+ union {
+ struct {
+ __le16 mac_type;
+ __u8 mac_addr[ETH_ALEN];
+ } mbss;
+ __u8 mac_addr[ETH_ALEN];
+ };
+} __attribute__((packed));
+
+#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
+#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
+#define MWL8K_MAC_TYPE_PRIMARY_AP 2
+#define MWL8K_MAC_TYPE_SECONDARY_AP 3
+
+static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_cmd_set_mac_addr *cmd;
+ int mac_type;
+ int rc;
+
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ if (priv->ap_fw) {
+ cmd->mbss.mac_type = cpu_to_le16(mac_type);
+ memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
- switch (info->ht_operation_mode &
- IEEE80211_HT_OP_MODE_PROTECTION) {
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
- break;
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
- break;
- default:
- prot_mode = MWL8K_FRAME_PROT_DISABLED;
- break;
- }
+ memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
- cmd->protection_mode = cpu_to_le16(prot_mode);
- memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_SET_RATEADAPT_MODE.
+ */
+struct mwl8k_cmd_set_rate_adapt_mode {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 mode;
+} __attribute__((packed));
+
+static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
+{
+ struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->mode = cpu_to_le16(mode);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2639,115 +2840,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_RATE.
+ * CMD_BSS_START.
*/
-struct mwl8k_cmd_update_rateset {
- struct mwl8k_cmd_pkt header;
- __u8 legacy_rates[14];
-
- /* Bitmap for supported MCS codes. */
- __u8 mcs_set[16];
- __u8 reserved[16];
+struct mwl8k_cmd_bss_start {
+ struct mwl8k_cmd_pkt header;
+ __le32 enable;
} __attribute__((packed));
-static int mwl8k_update_rateset(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int enable)
{
- struct mwl8k_cmd_update_rateset *cmd;
+ struct mwl8k_cmd_bss_start *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ cmd->enable = cpu_to_le32(enable);
- rc = mwl8k_post_cmd(hw, &cmd->header);
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_USE_FIXED_RATE.
+ * CMD_SET_NEW_STN.
*/
-#define MWL8K_RATE_TABLE_SIZE 8
-#define MWL8K_UCAST_RATE 0
-#define MWL8K_USE_AUTO_RATE 0x0002
+struct mwl8k_cmd_set_new_stn {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
+ __u8 mac_addr[6];
+ __le16 stn_id;
+ __le16 action;
+ __le16 rsvd;
+ __le32 legacy_rates;
+ __u8 ht_rates[4];
+ __le16 cap_info;
+ __le16 ht_capabilities_info;
+ __u8 mac_ht_param_info;
+ __u8 rev;
+ __u8 control_channel;
+ __u8 add_channel;
+ __le16 op_mode;
+ __le16 stbc;
+ __u8 add_qos_info;
+ __u8 is_qos_sta;
+ __le32 fw_sta_ptr;
+} __attribute__((packed));
+
+#define MWL8K_STA_ACTION_ADD 0
+#define MWL8K_STA_ACTION_REMOVE 2
+
+static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ u32 rates;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->aid = cpu_to_le16(sta->aid);
+ memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
+ cmd->stn_id = cpu_to_le16(sta->aid);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ cmd->legacy_rates = cpu_to_le32(rates);
+ if (sta->ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ cmd->is_qos_sta = 1;
+ }
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
-struct mwl8k_rate_entry {
- /* Set to 1 if HT rate, 0 if legacy. */
- __le32 is_ht_rate;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- /* Set to 1 to use retry_count field. */
- __le32 enable_retry;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
- /* Specified legacy rate or MCS. */
- __le32 rate;
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
- /* Number of allowed retries. */
- __le32 retry_count;
+ return rc;
+}
+
+/*
+ * CMD_UPDATE_STADB.
+ */
+struct ewc_ht_info {
+ __le16 control1;
+ __le16 control2;
+ __le16 control3;
} __attribute__((packed));
-struct mwl8k_rate_table {
- /* 1 to allow specified rate and below */
- __le32 allow_rate_drop;
- __le32 num_rates;
- struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
+struct peer_capability_info {
+ /* Peer type - AP vs. STA. */
+ __u8 peer_type;
+
+ /* Basic 802.11 capabilities from assoc resp. */
+ __le16 basic_caps;
+
+ /* Set if peer supports 802.11n high throughput (HT). */
+ __u8 ht_support;
+
+ /* Valid if HT is supported. */
+ __le16 ht_caps;
+ __u8 extended_ht_caps;
+ struct ewc_ht_info ewc_info;
+
+ /* Legacy rate table. Intersection of our rates and peer rates. */
+ __u8 legacy_rates[12];
+
+ /* HT rate table. Intersection of our rates and peer rates. */
+ __u8 ht_rates[16];
+ __u8 pad[16];
+
+ /* If set, interoperability mode, no proprietary extensions. */
+ __u8 interop;
+ __u8 pad2;
+ __u8 station_id;
+ __le16 amsdu_enabled;
} __attribute__((packed));
-struct mwl8k_cmd_use_fixed_rate {
- struct mwl8k_cmd_pkt header;
+struct mwl8k_cmd_update_stadb {
+ struct mwl8k_cmd_pkt header;
+
+ /* See STADB_ACTION_TYPE */
__le32 action;
- struct mwl8k_rate_table rate_table;
- /* Unicast, Broadcast or Multicast */
- __le32 rate_type;
- __le32 reserved1;
- __le32 reserved2;
+ /* Peer MAC address */
+ __u8 peer_addr[ETH_ALEN];
+
+ __le32 reserved;
+
+ /* Peer info - valid during add/update. */
+ struct peer_capability_info peer_info;
} __attribute__((packed));
-static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
- u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
+#define MWL8K_STA_DB_MODIFY_ENTRY 1
+#define MWL8K_STA_DB_DEL_ENTRY 2
+
+/* Peer Entry flags - used to define the type of the peer node */
+#define MWL8K_PEER_TYPE_ACCESSPOINT 2
+
+static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mwl8k_cmd_use_fixed_rate *cmd;
- int count;
+ struct mwl8k_cmd_update_stadb *cmd;
+ struct peer_capability_info *p;
+ u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
+ memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
+
+ p = &cmd->peer_info;
+ p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
+ p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
+ p->ht_support = sta->ht_cap.ht_supported;
+ p->ht_caps = sta->ht_cap.cap;
+ p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ legacy_rate_mask_to_array(p->legacy_rates, rates);
+ memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ p->interop = 1;
+ p->amsdu_enabled = 0;
- cmd->action = cpu_to_le32(action);
- cmd->rate_type = cpu_to_le32(rate_type);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (rate_table != NULL) {
- /*
- * Copy over each field manually so that endian
- * conversion can be done.
- */
- cmd->rate_table.allow_rate_drop =
- cpu_to_le32(rate_table->allow_rate_drop);
- cmd->rate_table.num_rates =
- cpu_to_le32(rate_table->num_rates);
-
- for (count = 0; count < rate_table->num_rates; count++) {
- struct mwl8k_rate_entry *dst =
- &cmd->rate_table.rate_entry[count];
- struct mwl8k_rate_entry *src =
- &rate_table->rate_entry[count];
-
- dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
- dst->enable_retry = cpu_to_le32(src->enable_retry);
- dst->rate = cpu_to_le32(src->rate);
- dst->retry_count = cpu_to_le32(src->retry_count);
- }
- }
+ return rc ? rc : p->station_id;
+}
+
+static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_update_stadb *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
+ memcpy(cmd->peer_addr, addr, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2766,19 +3107,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
- iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
-
if (!status)
return IRQ_NONE;
- if (status & MWL8K_A2H_INT_TX_DONE)
- tasklet_schedule(&priv->tx_reclaim_task);
+ if (status & MWL8K_A2H_INT_TX_DONE) {
+ status &= ~MWL8K_A2H_INT_TX_DONE;
+ tasklet_schedule(&priv->poll_tx_task);
+ }
if (status & MWL8K_A2H_INT_RX_READY) {
- while (rxq_process(hw, 0, 1))
- rxq_refill(hw, 0, 1);
+ status &= ~MWL8K_A2H_INT_RX_READY;
+ tasklet_schedule(&priv->poll_rx_task);
}
+ if (status)
+ iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
@@ -2793,6 +3137,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mwl8k_tx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+ int i;
+
+ limit = 32;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
+
+ if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
+ complete(priv->tx_wait);
+ priv->tx_wait = NULL;
+ }
+
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_TX_DONE,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_tx_task);
+ }
+}
+
+static void mwl8k_rx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+
+ limit = 32;
+ limit -= rxq_process(hw, 0, limit);
+ limit -= rxq_refill(hw, 0, limit);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_rx_task);
+ }
+}
+
/*
* Core driver operations.
@@ -2803,7 +3194,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int index = skb_get_queue_mapping(skb);
int rc;
- if (priv->current_channel == NULL) {
+ if (!priv->radio_on) {
printk(KERN_DEBUG "%s: dropped TX frame since radio "
"disabled\n", wiphy_name(hw->wiphy));
dev_kfree_skb(skb);
@@ -2828,19 +3219,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return -EIO;
}
- /* Enable tx reclaim tasklet */
- tasklet_enable(&priv->tx_reclaim_task);
+ /* Enable TX reclaim and RX tasklets. */
+ tasklet_enable(&priv->poll_tx_task);
+ tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
rc = mwl8k_fw_lock(hw);
if (!rc) {
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (!priv->ap_fw) {
if (!rc)
- rc = mwl8k_enable_sniffer(hw, 0);
+ rc = mwl8k_cmd_enable_sniffer(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3243,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
}
if (!rc)
- rc = mwl8k_cmd_setrateadaptmode(hw, 0);
+ rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
if (!rc)
- rc = mwl8k_set_wmm(hw, 0);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 0);
mwl8k_fw_unlock(hw);
}
@@ -2862,7 +3254,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
}
return rc;
@@ -2873,7 +3266,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_802_11_radio_disable(hw);
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -2886,36 +3279,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
- /* Stop tx reclaim tasklet */
- tasklet_disable(&priv->tx_reclaim_task);
+ /* Stop TX reclaim and RX tasklets. */
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
-
- /*
- * We only support one active interface at a time.
- */
- if (priv->vif != NULL)
- return -EBUSY;
-
- /*
- * We only support managed interfaces for now.
- */
- if (conf->type != NL80211_IFTYPE_STATION)
- return -EINVAL;
+ u32 macids_supported;
+ int macid;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
- * mode.
+ * mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3308,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
- /* Clean out driver private area */
- mwl8k_vif = MWL8K_VIF(conf->vif);
- memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
- /* Set and save the mac address */
- mwl8k_set_mac_addr(hw, conf->mac_addr);
- memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ macids_supported = priv->ap_macids_supported;
+ break;
+ case NL80211_IFTYPE_STATION:
+ macids_supported = priv->sta_macids_supported;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Back pointer to parent config block */
- mwl8k_vif->priv = priv;
+ macid = ffs(macids_supported & ~priv->macids_used);
+ if (!macid--)
+ return -EBUSY;
- /* Set Initial sequence number to zero */
+ /* Setup driver private area. */
+ mwl8k_vif = MWL8K_VIF(vif);
+ memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
+ mwl8k_vif->vif = vif;
+ mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
- priv->vif = conf->vif;
- priv->current_channel = NULL;
+ /* Set the mac address. */
+ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
+
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_add_self(hw, vif);
+
+ priv->macids_used |= 1 << mwl8k_vif->macid;
+ list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->vif == NULL)
- return;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->vif = NULL;
+ priv->macids_used &= ~(1 << mwl8k_vif->macid);
+ list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3365,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
int rc;
if (conf->flags & IEEE80211_CONF_IDLE) {
- mwl8k_cmd_802_11_radio_disable(hw);
- priv->current_channel = NULL;
+ mwl8k_cmd_radio_disable(hw);
return 0;
}
@@ -2973,19 +3373,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
return rc;
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf->channel);
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
- priv->current_channel = conf->channel;
-
if (conf->power_level > 18)
conf->power_level = 18;
- rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level);
+ rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
goto out;
@@ -3003,79 +3401,160 @@ out:
return rc;
}
-static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static void
+mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ u32 ap_legacy_rates;
+ u8 ap_mcs_rates[16];
int rc;
- if ((changed & BSS_CHANGED_ASSOC) == 0)
+ if (mwl8k_fw_lock(hw))
return;
- priv->capture_beacon = false;
-
- rc = mwl8k_fw_lock(hw);
- if (rc)
- return;
+ /*
+ * No need to capture a beacon if we're no longer associated.
+ */
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ priv->capture_beacon = false;
- if (info->assoc) {
- memcpy(&mwl8k_vif->bss_info, info,
- sizeof(struct ieee80211_bss_conf));
+ /*
+ * Get the AP's legacy and MCS rates.
+ */
+ if (vif->bss_conf.assoc) {
+ struct ieee80211_sta *ap;
- memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
+ rcu_read_lock();
- /* Install rates */
- rc = mwl8k_update_rateset(hw, vif);
- if (rc)
+ ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (ap == NULL) {
+ rcu_read_unlock();
goto out;
+ }
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ } else {
+ ap_legacy_rates =
+ ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ }
+ memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+
+ rcu_read_unlock();
+ }
- /* Turn on rate adaptation */
- rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
- MWL8K_UCAST_RATE, NULL);
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
- /* Set radio preamble */
- rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ }
- /* Set slot time */
- rc = mwl8k_cmd_set_slot(hw, info->use_short_slot);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
if (rc)
goto out;
+ }
- /* Update peer rate info */
- rc = mwl8k_cmd_update_sta_db(hw, vif,
- MWL8K_STA_DB_MODIFY_ENTRY);
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
+ }
- /* Set AID */
- rc = mwl8k_cmd_set_aid(hw, vif);
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT))) {
+ rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
+ }
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
- memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
+ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
- } else {
- rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
- memset(&mwl8k_vif->bss_info, 0,
- sizeof(struct ieee80211_bss_conf));
- memset(mwl8k_vif->bssid, 0, ETH_ALEN);
}
out:
mwl8k_fw_unlock(hw);
}
+static void
+mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ int rc;
+
+ if (mwl8k_fw_lock(hw))
+ return;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
+ if (rc)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ int idx;
+ int rate;
+
+ /*
+ * Use lowest supported basic rate for multicasts
+ * and management frames (such as probe responses --
+ * beacons will always go out at 1 Mb/s).
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb != NULL) {
+ mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
+
+out:
+ mwl8k_fw_unlock(hw);
+}
+
+static void
+mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (!priv->ap_fw)
+ mwl8k_bss_info_changed_sta(hw, vif, info, changed);
+ else
+ mwl8k_bss_info_changed_ap(hw, vif, info, changed);
+}
+
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mclist)
{
@@ -3105,7 +3584,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
- if (priv->vif != NULL) {
+ if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
printk(KERN_INFO "%s: not enabling sniffer "
"mode because STA interface is active\n",
@@ -3114,7 +3593,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
}
if (!priv->sniffer_enabled) {
- if (mwl8k_enable_sniffer(hw, 1))
+ if (mwl8k_cmd_enable_sniffer(hw, 1))
return 0;
priv->sniffer_enabled = true;
}
@@ -3126,6 +3605,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
return 1;
}
+static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
+{
+ if (!list_empty(&priv->vif_list))
+ return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
+
+ return NULL;
+}
+
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -3163,7 +3650,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
}
if (priv->sniffer_enabled) {
- mwl8k_enable_sniffer(hw, 0);
+ mwl8k_cmd_enable_sniffer(hw, 0);
priv->sniffer_enabled = false;
}
@@ -3174,7 +3661,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
- u8 *bssid;
+ struct mwl8k_vif *mwl8k_vif;
+ const u8 *bssid;
/*
* Enable the BSS filter.
@@ -3184,9 +3672,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
- bssid = "\x01\x00\x00\x00\x00\x00";
- if (priv->vif != NULL)
- bssid = MWL8K_VIF(priv->vif)->bssid;
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ bssid = mwl8k_vif->vif->bss_conf.bssid;
+ else
+ bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
@@ -3213,7 +3703,93 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value);
+ return mwl8k_cmd_set_rts_threshold(hw, value);
+}
+
+struct mwl8k_sta_notify_item
+{
+ struct list_head list;
+ struct ieee80211_vif *vif;
+ enum sta_notify_cmd cmd;
+ struct ieee80211_sta sta;
+};
+
+static void
+mwl8k_do_sta_notify(struct ieee80211_hw *hw, struct mwl8k_sta_notify_item *s)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ /*
+ * STA firmware uses UPDATE_STADB, AP firmware uses SET_NEW_STN.
+ */
+ if (!priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ int rc;
+
+ rc = mwl8k_cmd_update_stadb_add(hw, s->vif, &s->sta);
+ if (rc >= 0) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(s->vif, s->sta.addr);
+ if (sta != NULL)
+ MWL8K_STA(sta)->peer_id = rc;
+ rcu_read_unlock();
+ }
+ } else if (!priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_update_stadb_del(hw, s->vif, s->sta.addr);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ mwl8k_cmd_set_new_stn_add(hw, s->vif, &s->sta);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_set_new_stn_del(hw, s->vif, s->sta.addr);
+ }
+}
+
+static void mwl8k_sta_notify_worker(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, sta_notify_worker);
+ struct ieee80211_hw *hw = priv->hw;
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ while (!list_empty(&priv->sta_notify_list)) {
+ struct mwl8k_sta_notify_item *s;
+
+ s = list_entry(priv->sta_notify_list.next,
+ struct mwl8k_sta_notify_item, list);
+ list_del(&s->list);
+
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+
+ mwl8k_do_sta_notify(hw, s);
+ kfree(s);
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ }
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+}
+
+static void
+mwl8k_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_sta_notify_item *s;
+
+ if (cmd != STA_NOTIFY_ADD && cmd != STA_NOTIFY_REMOVE)
+ return;
+
+ s = kmalloc(sizeof(*s), GFP_ATOMIC);
+ if (s != NULL) {
+ s->vif = vif;
+ s->cmd = cmd;
+ s->sta = *sta;
+
+ spin_lock(&priv->sta_notify_list_lock);
+ list_add_tail(&s->list, &priv->sta_notify_list);
+ spin_unlock(&priv->sta_notify_list_lock);
+
+ ieee80211_queue_work(hw, &priv->sta_notify_worker);
+ }
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3225,14 +3801,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
rc = mwl8k_fw_lock(hw);
if (!rc) {
if (!priv->wmm_enabled)
- rc = mwl8k_set_wmm(hw, 1);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 1);
if (!rc)
- rc = mwl8k_set_edca_params(hw, queue,
- params->cw_min,
- params->cw_max,
- params->aifs,
- params->txop);
+ rc = mwl8k_cmd_set_edca_params(hw, queue,
+ params->cw_min,
+ params->cw_max,
+ params->aifs,
+ params->txop);
mwl8k_fw_unlock(hw);
}
@@ -3240,28 +3816,26 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
return rc;
}
-static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
+static int mwl8k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_tx_queue *txq;
- int index;
-
- spin_lock_bh(&priv->tx_lock);
- for (index = 0; index < MWL8K_TX_QUEUES; index++) {
- txq = priv->txq + index;
- memcpy(&stats[index], &txq->stats,
- sizeof(struct ieee80211_tx_queue_stats));
- }
- spin_unlock_bh(&priv->tx_lock);
-
- return 0;
+ return mwl8k_cmd_get_stat(hw, stats);
}
-static int mwl8k_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int
+mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- return mwl8k_cmd_802_11_get_stat(hw, stats);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
}
static const struct ieee80211_ops mwl8k_ops = {
@@ -3275,67 +3849,71 @@ static const struct ieee80211_ops mwl8k_ops = {
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
+ .sta_notify = mwl8k_sta_notify,
.conf_tx = mwl8k_conf_tx,
- .get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
+ .ampdu_action = mwl8k_ampdu_action,
};
-static void mwl8k_tx_reclaim_handler(unsigned long data)
-{
- int i;
- struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
- struct mwl8k_priv *priv = hw->priv;
-
- spin_lock_bh(&priv->tx_lock);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 0);
-
- if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
- complete(priv->tx_wait);
- priv->tx_wait = NULL;
- }
- spin_unlock_bh(&priv->tx_lock);
-}
-
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
- u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
+ mgmt->u.beacon.variable, len);
+ int dtim_period = 1;
- mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
- dev_kfree_skb(skb);
+ if (tim && tim[1] >= 2)
+ dtim_period = tim[3];
+ mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
+
+ dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
- MWL8687 = 0,
+ MWL8363 = 0,
+ MWL8687,
MWL8366,
};
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
- {
+ [MWL8363] = {
+ .part_name = "88w8363",
+ .helper_image = "mwl8k/helper_8363.fw",
+ .fw_image = "mwl8k/fmimage_8363.fw",
+ },
+ [MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
.fw_image = "mwl8k/fmimage_8687.fw",
- .rxd_ops = &rxd_8687_ops,
- .modes = BIT(NL80211_IFTYPE_STATION),
},
- {
+ [MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
.fw_image = "mwl8k/fmimage_8366.fw",
- .rxd_ops = &rxd_8366_ops,
- .modes = 0,
+ .ap_rxd_ops = &rxd_8366_ap_ops,
},
};
+MODULE_FIRMWARE("mwl8k/helper_8363.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+MODULE_FIRMWARE("mwl8k/helper_8366.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+ { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3354,6 +3932,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
printed_version = 1;
}
+
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3370,6 +3949,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+
hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
if (hw == NULL) {
printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3377,17 +3957,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_reg;
}
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
priv = hw->priv;
priv->hw = hw;
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
- priv->rxd_ops = priv->device_info->rxd_ops;
- priv->sniffer_enabled = false;
- priv->wmm_enabled = false;
- priv->pending_tx_pkts = 0;
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
@@ -3410,16 +3987,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
}
}
- memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
- priv->band.band = IEEE80211_BAND_2GHZ;
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
- memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
+ /* Reset firmware and hardware */
+ mwl8k_hw_reset(priv);
+
+ /* Ask userland hotplug daemon for the device firmware */
+ rc = mwl8k_request_firmware(priv);
+ if (rc) {
+ printk(KERN_ERR "%s: Firmware files not found\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Load firmware into hardware */
+ rc = mwl8k_load_firmware(hw);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot start firmware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Reclaim memory once firmware is successfully loaded */
+ mwl8k_release_firmware(priv);
+
+
+ if (priv->ap_fw) {
+ priv->rxd_ops = priv->device_info->ap_rxd_ops;
+ if (priv->rxd_ops == NULL) {
+ printk(KERN_ERR "%s: Driver does not have AP "
+ "firmware image support for this hardware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+ } else {
+ priv->rxd_ops = &rxd_sta_ops;
+ }
+
+ priv->sniffer_enabled = false;
+ priv->wmm_enabled = false;
+ priv->pending_tx_pkts = 0;
+
/*
* Extra headroom is the size of the required DMA header
@@ -3432,33 +4039,40 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- hw->wiphy->interface_modes = priv->device_info->modes;
-
/* Set rssi and noise values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
- priv->vif = NULL;
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
priv->radio_short_preamble = 0;
+ /* Station database handling */
+ INIT_WORK(&priv->sta_notify_worker, mwl8k_sta_notify_worker);
+ spin_lock_init(&priv->sta_notify_list_lock);
+ INIT_LIST_HEAD(&priv->sta_notify_list);
+
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
- /* TX reclaim tasklet */
- tasklet_init(&priv->tx_reclaim_task,
- mwl8k_tx_reclaim_handler, (unsigned long)hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
if (priv->cookie == NULL)
- goto err_iounmap;
+ goto err_stop_firmware;
rc = mwl8k_rxq_init(hw, 0);
if (rc)
- goto err_iounmap;
+ goto err_free_cookie;
rxq_refill(hw, 0, INT_MAX);
mutex_init(&priv->fw_mutex);
@@ -3478,7 +4092,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
+ iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3489,31 +4104,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_queues;
}
- /* Reset firmware and hardware */
- mwl8k_hw_reset(priv);
-
- /* Ask userland hotplug daemon for the device firmware */
- rc = mwl8k_request_firmware(priv);
- if (rc) {
- printk(KERN_ERR "%s: Firmware files not found\n",
- wiphy_name(hw->wiphy));
- goto err_free_irq;
- }
-
- /* Load firmware into hardware */
- rc = mwl8k_load_firmware(hw);
- if (rc) {
- printk(KERN_ERR "%s: Cannot start firmware\n",
- wiphy_name(hw->wiphy));
- goto err_stop_firmware;
- }
-
- /* Reclaim memory once firmware is successfully loaded */
- mwl8k_release_firmware(priv);
-
/*
* Temporarily enable interrupts. Initial firmware host
- * commands use interrupts and avoids polling. Disable
+ * commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3529,22 +4122,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot initialise firmware\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+
/* Turn radio off */
- rc = mwl8k_cmd_802_11_radio_disable(hw);
+ rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Clear MAC address */
- rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
printk(KERN_ERR "%s: Cannot clear MAC address\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Disable interrupts */
@@ -3555,7 +4155,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot register device\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_queues;
}
printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3567,10 +4167,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
return 0;
-err_stop_firmware:
- mwl8k_hw_reset(priv);
- mwl8k_release_firmware(priv);
-
err_free_irq:
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -3580,11 +4176,16 @@ err_free_queues:
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
-err_iounmap:
+err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
+err_stop_firmware:
+ mwl8k_hw_reset(priv);
+ mwl8k_release_firmware(priv);
+
+err_iounmap:
if (priv->regs != NULL)
pci_iounmap(pdev, priv->regs);
@@ -3622,15 +4223,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
- /* Remove tx reclaim tasklet */
- tasklet_kill(&priv->tx_reclaim_task);
+ /* Remove TX reclaim and RX tasklets. */
+ tasklet_kill(&priv->poll_tx_task);
+ tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 753a1804eee7..a9e9cea2d767 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1668,12 +1668,12 @@ __orinoco_set_multicast_list(struct net_device *dev)
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv))) {
+ (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
- mc_count = dev->mc_count;
+ mc_count = netdev_mc_count(dev);
}
err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c383410..075f446b3139 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_nortel_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e7..bda5317cc596 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f5..e0d5874ab42f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_plx_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc71..88cbc7902aa0 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_tmd_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45d..3fe6366e567c 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -216,7 +216,7 @@ static void p54_stop(struct ieee80211_hw *dev)
}
static int p54_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -226,28 +226,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
return 0;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -358,16 +358,6 @@ static int p54_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int p54_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct p54_common *priv = dev->priv;
-
- memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
- sizeof(stats[0]) * dev->queues);
- return 0;
-}
-
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -522,7 +512,6 @@ static const struct ieee80211_ops p54_ops = {
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
- .get_tx_stats = p54_get_tx_stats
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1afc39410e85..43a3b2ead81a 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -157,6 +157,12 @@ struct p54_led_dev {
#endif /* CONFIG_P54_LEDS */
+struct p54_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
+};
+
struct p54_common {
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
@@ -183,7 +189,7 @@ struct p54_common {
/* (e)DCF / QOS state */
bool use_short_slot;
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[8];
+ struct p54_tx_queue_stats tx_stats[8];
struct p54_edcf_queue_param qos_params[8];
/* Radio data */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a72f7c2577de..ed4bdffdd63e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
-static struct pci_device_id p54p_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
@@ -157,6 +157,14 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ dev_err(&priv->pdev->dev,
+ "RX DMA Mapping error\n");
+ break;
+ }
+
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -226,14 +234,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
}
-/* caller must hold priv->lock */
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- void **tx_buf)
+ struct sk_buff **tx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
+ struct sk_buff *skb;
u32 idx, i;
i = (*index) % ring_limit;
@@ -242,9 +250,8 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
desc = &ring[i];
- if (tx_buf[i])
- if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i]))
- p54_free_skb(dev, tx_buf[i]);
+
+ skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -255,17 +262,28 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
desc->len = 0;
desc->flags = 0;
+ if (skb && FREE_AFTER_TX(skb))
+ p54_free_skb(dev, skb);
+
i++;
i %= ring_limit;
}
}
-static void p54p_rx_tasklet(unsigned long dev_id)
+static void p54p_tasklet(unsigned long dev_id)
{
struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
+ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
+ ARRAY_SIZE(ring_control->tx_mgmt),
+ priv->tx_buf_mgmt);
+
+ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
+ ARRAY_SIZE(ring_control->tx_data),
+ priv->tx_buf_data);
+
p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
@@ -280,59 +298,49 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct p54p_priv *priv = dev->priv;
- struct p54p_ring_control *ring_control = priv->ring_control;
__le32 reg;
- spin_lock(&priv->lock);
reg = P54P_READ(int_ident);
if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+ goto out;
}
-
P54P_WRITE(int_ack, reg);
reg &= P54P_READ(int_enable);
- if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
- 3, ring_control->tx_mgmt,
- ARRAY_SIZE(ring_control->tx_mgmt),
- priv->tx_buf_mgmt);
-
- p54p_check_tx_ring(dev, &priv->tx_idx_data,
- 1, ring_control->tx_data,
- ARRAY_SIZE(ring_control->tx_data),
- priv->tx_buf_data);
-
- tasklet_schedule(&priv->rx_tasklet);
-
- } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
+ if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
+ tasklet_schedule(&priv->tasklet);
+ else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
complete(&priv->boot_comp);
- spin_unlock(&priv->lock);
-
+out:
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
+ unsigned long flags;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
- unsigned long flags;
struct p54p_desc *desc;
dma_addr_t mapping;
u32 device_idx, idx, i;
spin_lock_irqsave(&priv->lock, flags);
-
device_idx = le32_to_cpu(ring_control->device_idx[1]);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
- priv->tx_buf_data[i] = skb;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ p54_free_skb(dev, skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return ;
+ }
+ priv->tx_buf_data[i] = skb;
+
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
@@ -354,14 +362,14 @@ static void p54p_stop(struct ieee80211_hw *dev)
unsigned int i;
struct p54p_desc *desc;
- tasklet_kill(&priv->rx_tasklet);
-
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
free_irq(priv->pdev->irq, dev);
+ tasklet_kill(&priv->tasklet);
+
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
@@ -545,7 +553,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
err = request_firmware(&priv->firmware, "isl3886pci",
&priv->pdev->dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index fbb683953fb2..2feead617a3b 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -92,7 +92,7 @@ struct p54p_priv {
struct p54_common common;
struct pci_dev *pdev;
struct p54p_csr __iomem *map;
- struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tasklet;
const struct firmware *firmware;
spinlock_t lock;
struct p54p_ring_control *ring_control;
@@ -101,8 +101,8 @@ struct p54p_priv {
u32 rx_idx_mgmt, tx_idx_mgmt;
struct sk_buff *rx_buf_data[8];
struct sk_buff *rx_buf_mgmt[4];
- void *tx_buf_data[32];
- void *tx_buf_mgmt[4];
+ struct sk_buff *tx_buf_data[32];
+ struct sk_buff *tx_buf_mgmt[4];
struct completion boot_comp;
};
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb5..0e8f69461ffe 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -183,7 +183,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
struct sk_buff *skb,
const u16 p54_queue)
{
- struct ieee80211_tx_queue_stats *queue;
+ struct p54_tx_queue_stats *queue;
unsigned long flags;
if (WARN_ON(p54_queue > P54_QUEUE_NUM))
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f2..dc14420a9adc 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
-static const struct pci_device_id prism54_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
0x1260, 0x3890,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e1e4e32b22..85905cab4f16 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1950,7 +1950,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
- if (local->num_multi != dev->mc_count)
+ if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e15..14692bc51b51 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1492,10 +1492,10 @@ static void set_multicast_list(struct usbnet *usbdev)
filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
RNDIS_PACKET_TYPE_ALL_LOCAL;
} else if (usbdev->net->flags & IFF_ALLMULTI ||
- usbdev->net->mc_count > priv->multicast_size) {
+ netdev_mc_count(usbdev->net) > priv->multicast_size) {
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- } else if (usbdev->net->mc_count > 0) {
- size = min(priv->multicast_size, usbdev->net->mc_count);
+ } else if (!netdev_mc_empty(usbdev->net)) {
+ size = min(priv->multicast_size, netdev_mc_count(usbdev->net));
buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
if (!buf) {
devwarn(usbdev,
@@ -2594,23 +2594,9 @@ end:
/*
* driver/device initialization
*/
-static int bcm4320a_early_init(struct usbnet *usbdev)
-{
- /* bcm4320a doesn't handle configuration parameters well. Try
- * set any and you get partially zeroed mac and broken device.
- */
-
- return 0;
-}
-
-static int bcm4320b_early_init(struct usbnet *usbdev)
+static void rndis_copy_module_params(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- char buf[8];
-
- /* Early initialization settings, setting these won't have effect
- * if called after generic_rndis_bind().
- */
priv->param_country[0] = modparam_country[0];
priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2638,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
priv->param_workaround_interval = 500;
else
priv->param_workaround_interval = modparam_workaround_interval;
+}
+
+static int bcm4320a_early_init(struct usbnet *usbdev)
+{
+ /* copy module parameters for bcm4320a so that iwconfig reports txpower
+ * and workaround parameter is copied to private structure correctly.
+ */
+ rndis_copy_module_params(usbdev);
+
+ /* bcm4320a doesn't handle configuration parameters well. Try
+ * set any and you get partially zeroed mac and broken device.
+ */
+
+ return 0;
+}
+
+static int bcm4320b_early_init(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ char buf[8];
+
+ rndis_copy_module_params(usbdev);
+
+ /* Early initialization settings, setting these won't have effect
+ * if called after generic_rndis_bind().
+ */
rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
rndis_set_config_parameter_str(usbdev, "FrameBursting",
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaabb..3ca824a91ad9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,12 +54,12 @@ config RT61PCI
When compiled as a module, this driver will be called rt61pci.
config RT2800PCI_PCI
- tristate
+ boolean
depends on PCI
default y
config RT2800PCI_SOC
- tristate
+ boolean
depends on RALINK_RT288X || RALINK_RT305X
default y
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a418..108982762d45 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* RF2420 chipset don't need any additional actions.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2420))
+ if (rt2x00_rf(rt2x00dev, RF2420))
return;
/*
@@ -1343,8 +1343,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
+ if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1562,7 +1561,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2400pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2400pci_get_tsf,
.tx_last_beacon = rt2400pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1643,7 +1641,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
/*
* RT2400pci module information.
*/
-static struct pci_device_id rt2400pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f5..f6440bb0e5f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch on tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523))
+ if (!rt2x00_rf(rt2x00dev, RF2523))
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
/*
* For RT2525 we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ if (rt2x00_rf(rt2x00dev, RF2525)) {
static const u32 vals[] = {
0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch off tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ if (!rt2x00_rf(rt2x00dev, RF2523)) {
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
}
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* up to version C the link tuning should halt after 20
* seconds while being associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
rt2x00dev->intf_associated && count > 20)
return;
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* should go straight to dynamic CCA tuning when they
* are not associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D ||
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
!rt2x00dev->intf_associated)
goto dynamic_cca_tune;
@@ -1507,12 +1506,12 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1860,7 +1859,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2500pci_get_tsf,
.tx_last_beacon = rt2500pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1941,7 +1939,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
/*
* RT2500pci module information.
*/
-static struct pci_device_id rt2500pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59de..81ca4ec068db 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* For RT2525E we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E)) {
static const u32 vals[] = {
0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
+ if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
} else {
@@ -1411,19 +1410,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
-
+ if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0) ||
+ rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1667,22 +1665,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1763,7 +1761,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9deae41cb784..a45e027f2d1f 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
#include <linux/module.h>
#include "rt2x00.h"
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
#include "rt2800lib.h"
@@ -220,8 +220,7 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
/*
* RT2880 and RT3052 don't support MCU requests.
*/
- if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
- rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (rt2x00_rt(rt2x00dev, RT2880) || rt2x00_rt(rt2x00dev, RT3052))
return;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +245,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -348,7 +366,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
return 0;
}
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
@@ -357,7 +375,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
-EXPORT_SYMBOL_GPL(rt2800_init_led);
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
@@ -806,12 +823,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
- rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
- (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3021) ||
- rt2x00_rf(&rt2x00dev->chip, RF3022)))
+ if ((rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090)) &&
+ (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)))
rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +895,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1041,7 +1058,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION)
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1089,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)
return;
/*
@@ -1121,7 +1138,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_intf_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -1158,7 +1175,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1202,8 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
- rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+ if (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION &&
+ rt2x00_rev(rt2x00dev) < RT3070_VERSION)
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1465,22 +1482,22 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
}
- if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+ if (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)
rt2800_bbp_write(rt2x00dev, 84, 0x19);
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
}
- if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+ if (rt2x00_rt(rt2x00dev, RT3052)) {
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1566,13 +1583,13 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
u8 bbp;
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ rt2x00_rev(rt2x00dev) != RT3070_VERSION)
return 0;
if (rt2x00_intf_is_pci(rt2x00dev)) {
- if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022))
+ if (!rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022))
return 0;
}
@@ -1737,7 +1754,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
- } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+ } else if (rt2x00_rev(rt2x00dev) < RT2883_VERSION) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1839,17 +1856,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
if (rt2x00_intf_is_usb(rt2x00dev)) {
- struct rt2x00_chip *chip = &rt2x00dev->chip;
-
/*
* The check for rt2860 is not a typo, some rt2870 hardware
* identifies itself as rt2860 in the CSR register.
*/
- if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
+ if (rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28600000) ||
+ rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28700000) ||
+ rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28800000)) {
rt2x00_set_chip_rt(rt2x00dev, RT2870);
- } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
+ } else if (rt2x00_check_rev(rt2x00dev, 0xffff0000, 0x30700000)) {
rt2x00_set_chip_rt(rt2x00dev, RT3070);
} else {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
@@ -1858,14 +1873,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
}
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+ if (!rt2x00_rf(rt2x00dev, RF2820) &&
+ !rt2x00_rf(rt2x00dev, RF2850) &&
+ !rt2x00_rf(rt2x00dev, RF2720) &&
+ !rt2x00_rf(rt2x00dev, RF2750) &&
+ !rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF2020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022) &&
+ !rt2x00_rf(rt2x00dev, RF3052)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2013,7 +2029,6 @@ static const struct rf_channel rf_vals_302x[] = {
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
- struct rt2x00_chip *chip = &rt2x00dev->chip;
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power1;
@@ -2049,19 +2064,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(chip, RF2820) ||
- rt2x00_rf(chip, RF2720) ||
- (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
+ if (rt2x00_rf(rt2x00dev, RF2820) ||
+ rt2x00_rf(rt2x00dev, RF2720) ||
+ rt2x00_rf(rt2x00dev, RF3052)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF3020) ||
- rt2x00_rf(chip, RF2020) ||
- rt2x00_rf(chip, RF3021) ||
- rt2x00_rf(chip, RF3022)) {
+ } else if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)) {
spec->num_channels = ARRAY_SIZE(rf_vals_302x);
spec->channels = rf_vals_302x;
}
@@ -2069,7 +2084,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize HT information.
*/
- if (!rt2x00_rf(chip, RF2020))
+ if (!rt2x00_rf(rt2x00dev, RF2020))
spec->ht.ht_supported = true;
else
spec->ht.ht_supported = false;
@@ -2282,7 +2297,6 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.set_rts_threshold = rt2800_set_rts_threshold,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac8..ebabeae62d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_led *led, enum led_type type);
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44d..d64181cbc9cb 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
#include "rt2800.h"
#include "rt2800pci.h"
-#ifdef CONFIG_RT2800PCI_PCI_MODULE
-#define CONFIG_RT2800PCI_PCI
-#endif
-
-#ifdef CONFIG_RT2800PCI_WISOC_MODULE
-#define CONFIG_RT2800PCI_WISOC
-#endif
-
/*
* Allow hardware encryption to be disabled.
*/
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
-static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
- rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
- rt2800pci_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
__le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+ if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
* Remove TXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, RXWI_DESC_SIZE);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -1133,8 +1103,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* This device requires firmware.
*/
- if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
- !rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (!rt2x00_rt(rt2x00dev, RT2880) && !rt2x00_rt(rt2x00dev, RT3052))
__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,7 +1190,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-static struct pci_device_id rt2800pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1255,7 +1224,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
#endif /* CONFIG_RT2800PCI_PCI */
MODULE_LICENSE("GPL");
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
#if defined(CONFIG_RALINK_RT288X)
__rt2x00soc_probe(RT2880, &rt2800pci_ops);
#elif defined(CONFIG_RALINK_RT305X)
@@ -1273,7 +1242,7 @@ static struct platform_driver rt2800soc_driver = {
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1259,7 @@ static int __init rt2800pci_init(void)
{
int ret = 0;
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
@@ -1298,7 +1267,7 @@ static int __init rt2800pci_init(void)
#ifdef CONFIG_RT2800PCI_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@@ -1313,7 +1282,7 @@ static void __exit rt2800pci_exit(void)
#ifdef CONFIG_RT2800PCI_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a3..82755cf8b73e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,7 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+ u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
size_t offset = 0;
/*
@@ -138,7 +138,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
u32 reg;
u32 offset;
u32 length;
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+ u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
/*
* Check which section of the firmware we need.
@@ -248,24 +248,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
-static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -274,7 +256,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +277,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
- /* Don't use bulk in aggregation when working with USB 1.1 */
- rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
- (rt2x00dev->rx->usb_maxpacket == 512));
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
/*
* Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +326,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
- rt2800usb_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
@@ -573,41 +553,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- __le32 *rxd = (__le32 *)entry->skb->data;
+ __le32 *rxi = (__le32 *)entry->skb->data;
__le32 *rxwi;
- u32 rxd0;
+ __le32 *rxd;
+ u32 rxi0;
u32 rxwi0;
u32 rxwi1;
u32 rxwi2;
u32 rxwi3;
+ u32 rxd0;
+ int rx_pkt_len;
+
+ /*
+ * RX frame format is :
+ * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
+ * |<------------ rx_pkt_len -------------->|
+ */
+ rt2x00_desc_read(rxi, 0, &rxi0);
+ rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
+
+ rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+
+ /*
+ * FIXME : we need to check for rx_pkt_len validity
+ */
+ rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
/*
* Copy descriptor to the skbdesc->desc buffer, making it safe from
* moving of frame data in rt2x00usb.
*/
- memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
- rxd = (__le32 *)skbdesc->desc;
- rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &rxd0);
rt2x00_desc_read(rxwi, 0, &rxwi0);
rt2x00_desc_read(rxwi, 1, &rxwi1);
rt2x00_desc_read(rxwi, 2, &rxwi2);
rt2x00_desc_read(rxwi, 3, &rxwi3);
+ rt2x00_desc_read(rxd, 0, &rxd0);
- if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
+ if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
+ rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +618,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
+ if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +657,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* Remove RXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, skbdesc->desc_len);
- skb_trim(entry->skb, rxdesc->size);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182ef..d1d8ae94b4d4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -101,6 +103,54 @@
#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
/*
+ * RX Info structure
+ */
+
+/*
+ * Word 0
+ */
+
+#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
+#define RXWI_W0_BSSID FIELD32(0x00001c00)
+#define RXWI_W0_UDF FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define RXWI_W0_TID FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
+#define RXWI_W1_MCS FIELD32(0x007f0000)
+#define RXWI_W1_BW FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
+#define RXWI_W1_STBC FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0 FIELD32(0x000000ff)
+#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
+
+/*
* RX descriptor format for RX Ring.
*/
@@ -115,25 +165,25 @@
* AMSDU: rx with 802.3 header, not 802.11 header.
*/
-#define RXINFO_W0_BA FIELD32(0x00000001)
-#define RXINFO_W0_DATA FIELD32(0x00000002)
-#define RXINFO_W0_NULLDATA FIELD32(0x00000004)
-#define RXINFO_W0_FRAG FIELD32(0x00000008)
-#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXINFO_W0_MULTICAST FIELD32(0x00000020)
-#define RXINFO_W0_BROADCAST FIELD32(0x00000040)
-#define RXINFO_W0_MY_BSS FIELD32(0x00000080)
-#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100)
-#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600)
-#define RXINFO_W0_AMSDU FIELD32(0x00000800)
-#define RXINFO_W0_HTC FIELD32(0x00001000)
-#define RXINFO_W0_RSSI FIELD32(0x00002000)
-#define RXINFO_W0_L2PAD FIELD32(0x00004000)
-#define RXINFO_W0_AMPDU FIELD32(0x00008000)
-#define RXINFO_W0_DECRYPTED FIELD32(0x00010000)
-#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000)
-#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000)
-#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000)
-#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000)
+#define RXD_W0_BA FIELD32(0x00000001)
+#define RXD_W0_DATA FIELD32(0x00000002)
+#define RXD_W0_NULLDATA FIELD32(0x00000004)
+#define RXD_W0_FRAG FIELD32(0x00000008)
+#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W0_MULTICAST FIELD32(0x00000020)
+#define RXD_W0_BROADCAST FIELD32(0x00000040)
+#define RXD_W0_MY_BSS FIELD32(0x00000080)
+#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W0_AMSDU FIELD32(0x00000800)
+#define RXD_W0_HTC FIELD32(0x00001000)
+#define RXD_W0_RSSI FIELD32(0x00002000)
+#define RXD_W0_L2PAD FIELD32(0x00004000)
+#define RXD_W0_AMPDU FIELD32(0x00008000)
+#define RXD_W0_DECRYPTED FIELD32(0x00010000)
+#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
+#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
+#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
+#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index dcfc8c25d1a7..43b70c6e4e9c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
/*
+ * Determine the number of L2 padding bytes required between the header and
+ * the payload.
+ */
+#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
+
+/*
* Determine the alignment requirement,
* to make sure the 802.11 payload is padded to a 4-byte boundrary
* we must determine the address of the payload and calculate the
@@ -937,25 +943,25 @@ static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
- return (chipset->rt == chip);
+ return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
- return (chipset->rf == chip);
+ return (rt2x00dev->chip.rf == rf);
}
-static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
+static inline u32 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
{
- return chipset->rev;
+ return rt2x00dev->chip.rev;
}
-static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_check_rev(struct rt2x00_dev *rt2x00dev,
const u32 mask, const u32 rev)
{
- return ((chipset->rev & mask) == rev);
+ return ((rt2x00dev->chip.rev & mask) == rev);
}
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -964,20 +970,20 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.intf = intf;
}
-static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
- return (chipset->intf == intf);
+ return (rt2x00dev->chip.intf == intf);
}
static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
}
static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
}
/**
@@ -1019,9 +1025,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -1038,8 +1044,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 265e66dba552..b93731b79903 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
- /* Trim buffer to correct size */
- skb_trim(entry->skb, rxdesc.size);
-
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
(rxdesc.flags & RX_FLAG_IV_STRIPPED))
rt2x00crypto_rx_insert_iv(entry->skb, header_length,
&rxdesc);
- else if (rxdesc.dev_flags & RXDONE_L2PAD)
+ else if (header_length &&
+ (rxdesc.size > header_length) &&
+ (rxdesc.dev_flags & RXDONE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
else
rt2x00queue_align_payload(entry->skb, header_length);
+ /* Trim buffer to correct size */
+ skb_trim(entry->skb, rxdesc.size);
+
/*
* Check if the frame was received using HT. In that case,
* the rate is the MCS index and should be passed to mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed8..abbd857ec759 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct queue_entry *entry = NULL;
unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
/*
* We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* increase interface count and start initialization.
*/
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count++;
else
rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
- if (conf->type == NL80211_IFTYPE_AP)
- memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
- memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
+ if (vif->type == NL80211_IFTYPE_AP)
+ memcpy(&intf->bssid, vif->addr, ETH_ALEN);
+ memcpy(&intf->mac, vif->addr, ETH_ALEN);
/*
* The MAC adddress must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
*/
- rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
/*
* Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
* no interface is present.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
- (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
- (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
+ (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
+ (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
return;
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count--;
else
rt2x00dev->intf_sta_count--;
@@ -555,22 +555,6 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
- unsigned int i;
-
- for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
- stats[i].len = rt2x00dev->tx[i].length;
- stats[i].limit = rt2x00dev->tx[i].limit;
- stats[i].count = rt2x00dev->tx[i].count;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats);
-
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e4668..801be436cf1d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9915a09141ef..0b4801a14601 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int frame_length = skb->len;
+ unsigned int payload_length = skb->len - header_length;
unsigned int header_align = ALIGN_SIZE(skb, 0);
unsigned int payload_align = ALIGN_SIZE(skb, header_length);
- unsigned int l2pad = 4 - (payload_align - header_align);
+ unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
- if (header_align == payload_align) {
- /*
- * Both header and payload must be moved the same
- * amount of bytes to align them properly. This means
- * we don't use the L2 padding but just move the entire
- * frame.
- */
- rt2x00queue_align_frame(skb);
- } else if (!payload_align) {
- /*
- * Simple L2 padding, only the header needs to be moved,
- * the payload is already properly aligned.
- */
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, frame_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- } else {
- /*
- *
- * Complicated L2 padding, both header and payload need
- * to be moved. By default we only move to the start
- * of the buffer, so our header alignment needs to be
- * increased if there is not enough room for the header
- * to be moved.
- */
- if (payload_align > header_align)
- header_align += 4;
+ /*
+ * Adjust the header alignment if the payload needs to be moved more
+ * than the header.
+ */
+ if (payload_align > header_align)
+ header_align += 4;
+
+ /* There is nothing to do if no alignment is needed */
+ if (!header_align)
+ return;
+
+ /* Reserve the amount of space needed in front of the frame */
+ skb_push(skb, header_align);
+
+ /*
+ * Move the header.
+ */
+ memmove(skb->data, skb->data + header_align, header_length);
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, header_length);
+ /* Move the payload, if present and if required */
+ if (payload_length && payload_align)
memmove(skb->data + header_length + l2pad,
skb->data + header_length + l2pad + payload_align,
- frame_length - header_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
+ payload_length);
+
+ /* Trim the skb to the correct size */
+ skb_trim(skb, header_length + l2pad + payload_length);
}
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int l2pad = 4 - (header_length & 3);
+ unsigned int l2pad = L2PAD_SIZE(header_length);
- if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
+ if (!l2pad)
return;
memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* Header and alignment information.
*/
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
+ (entry->skb->len > txdesc->header_length))
+ txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame.
+ * to be inserted into the frame, except for a frame that has been injected
+ * through a monitor interface. This latter is needed for testing a
+ * monitor interface.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1ac..c1e482bb37b3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
* @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
* @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
* mac80211 but was stripped for processing by the driver.
- * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
- * the padded bytes are located between header and payload.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
*/
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
- SKBDESC_L2_PADDED = 1 << 3,
- SKBDESC_NOT_MAC80211 = 1 << 4,
+ SKBDESC_NOT_MAC80211 = 1 << 3,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d71..74de53e68b4e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF5325));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
/*
* Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF2529));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
!test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
rt61pci_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ else if (rt2x00_rf(rt2x00dev, RF2529)) {
if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt61pci_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -2302,10 +2298,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ if (!rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF5325) &&
+ !rt2x00_rf(rt2x00dev, RF2527) &&
+ !rt2x00_rf(rt2x00dev, RF2529)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2360,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* the antenna settings should be gathered from the NIC
* eeprom word.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
+ if (rt2x00_rf(rt2x00dev, RF2529) &&
!test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2567,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_seq;
}
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325)) {
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_seq);
}
@@ -2735,7 +2730,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt61pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt61pci_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2812,7 +2806,7 @@ static const struct rt2x00_ops rt61pci_ops = {
/*
* RT61pci module information.
*/
-static struct pci_device_id rt61pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
/* RT2561s */
{ PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
/* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e16..3781eb7b4aa0 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
* all others contain 20 bits.
*/
rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
- 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527)));
+ 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
+ rt2x00_rf(rt2x00dev, RF2527)));
rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
- rt2x00_rf(&rt2x00dev->chip, RF5225))
+ if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
rt73usb_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2528) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
rt73usb_config_antenna_2x(rt2x00dev, ant);
}
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
reg = 0x000023b0;
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
@@ -1827,16 +1823,16 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0x25730) ||
+ rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF5226) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2528) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ if (!rt2x00_rf(rt2x00dev, RF5226) &&
+ !rt2x00_rf(rt2x00dev, RF2528) &&
+ !rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF2527)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2081,17 +2077,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
+ if (rt2x00_rf(rt2x00dev, RF2528)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
spec->channels = rf_vals_bg_2528;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5226)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5226);
spec->channels = rf_vals_5226;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2527)) {
spec->num_channels = 14;
spec->channels = rf_vals_5225_2527;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5225)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
spec->channels = rf_vals_5225_2527;
@@ -2249,7 +2245,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt73usb_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2354,6 +2349,7 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a8185..de3844fe06d8 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
struct rtl818x_csr __iomem *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
/* rtl8180 driver specific */
spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a1439984..2b928ecf47bd 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
-static struct pci_device_id rtl8180_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
};
-
-
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= RTL818X_CMD_TX_ENABLE;
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
- priv->mode = NL80211_IFTYPE_MONITOR;
return 0;
err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- priv->mode = NL80211_IFTYPE_UNSPECIFIED;
-
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
}
static int rtl8180_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- if (priv->mode != NL80211_IFTYPE_MONITOR)
- return -EOPNOTSUPP;
+ /*
+ * We only support one active interface at a time.
+ */
+ if (priv->vif)
+ return -EBUSY;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
- le32_to_cpu(*(__le32 *)conf->mac_addr));
+ le32_to_cpu(*(__le32 *)vif->addr));
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
- le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ le16_to_cpu(*(__le16 *)(vif->addr + 4)));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
return 0;
}
static void rtl8180_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
}
@@ -765,6 +761,14 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -775,6 +779,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.bss_info_changed = rtl8180_bss_info_changed,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
+ .get_tsf = rtl8180_get_tsf,
};
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3a..6bb32112e65c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
struct rtl818x_csr *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
+
/* The mutex protects the TX loopback state.
* Any attempt to set channels concurrently locks the device.
*/
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 7ba3052b0708..0fb850e0c656 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1019,31 +1019,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
}
static int rtl8187_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
int i;
int ret = -EOPNOTSUPP;
mutex_lock(&priv->conf_mutex);
- if (priv->mode != NL80211_IFTYPE_MONITOR)
+ if (priv->vif)
goto exit;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
goto exit;
}
ret = 0;
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
- ((u8 *)conf->mac_addr)[i]);
+ ((u8 *)vif->addr)[i]);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
exit:
@@ -1052,11 +1051,10 @@ exit:
}
static void rtl8187_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
mutex_unlock(&priv->conf_mutex);
}
@@ -1268,6 +1266,14 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
return 0;
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
.start = rtl8187_start,
@@ -1279,7 +1285,8 @@ static const struct ieee80211_ops rtl8187_ops = {
.prepare_multicast = rtl8187_prepare_multicast,
.configure_filter = rtl8187_configure_filter,
.conf_tx = rtl8187_conf_tx,
- .rfkill_poll = rtl8187_rfkill_poll
+ .rfkill_poll = rtl8187_rfkill_poll,
+ .get_tsf = rtl8187_get_tsf,
};
static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1366,7 +1373,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- priv->mode = NL80211_IFTYPE_MONITOR;
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb2..f82aa8b4bdde 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a124..37c61c19cae5 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
struct dentry *tx_queue_len;
+ struct dentry *tx_queue_status;
struct dentry *retry_count;
struct dentry *excessive_retries;
@@ -340,9 +341,6 @@ struct wl1251 {
/* Are we currently scanning */
bool scanning;
- /* Our association ID */
- u16 aid;
-
/* Default key (for WEP) */
u32 default_key;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc5..beff084040b5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
kfree(acx);
return ret;
}
+
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop)
+{
+ struct wl1251_acx_ac_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cw_max;
+ acx->aifsn = aifs;
+ acx->txop_limit = txop;
+
+ ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx ac cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy)
+{
+ struct wl1251_acx_tid_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
+ "ps_scheme %d ack_policy %d", queue, type, tsid,
+ ps_scheme, ack_policy);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->queue = queue;
+ acx->type = type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+
+ ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx tid cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd8..26160c45784c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __attribute__ ((packed));
+struct wl1251_acx_ac_cfg {
+ struct acx_header header;
+
+ /*
+ * Access Category - The TX queue's access category
+ * (refer to AccessCategory_enum)
+ */
+ u8 ac;
+
+ /*
+ * The contention window minimum size (in slots) for
+ * the access class.
+ */
+ u8 cw_min;
+
+ /*
+ * The contention window maximum size (in slots) for
+ * the access class.
+ */
+ u16 cw_max;
+
+ /* The AIF value (in slots) for the access class. */
+ u8 aifsn;
+
+ u8 reserved;
+
+ /* The TX Op Limit (in microseconds) for the access class. */
+ u16 txop_limit;
+} __attribute__ ((packed));
+
+
+enum wl1251_acx_channel_type {
+ CHANNEL_TYPE_DCF = 0,
+ CHANNEL_TYPE_EDCF = 1,
+ CHANNEL_TYPE_HCCA = 2,
+};
+
+enum wl1251_acx_ps_scheme {
+ /* regular ps: simple sending of packets */
+ WL1251_ACX_PS_SCHEME_LEGACY = 0,
+
+ /* sending a packet triggers a unscheduled apsd downstream */
+ WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
+
+ /* a pspoll packet will be sent before every data packet */
+ WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
+
+ /* scheduled apsd mode */
+ WL1251_ACX_PS_SCHEME_SAPSD = 3,
+};
+
+enum wl1251_acx_ack_policy {
+ WL1251_ACX_ACK_POLICY_LEGACY = 0,
+ WL1251_ACX_ACK_POLICY_NO_ACK = 1,
+ WL1251_ACX_ACK_POLICY_BLOCK = 2,
+};
+
+struct wl1251_acx_tid_cfg {
+ struct acx_header header;
+
+ /* tx queue id number (0-7) */
+ u8 queue;
+
+ /* channel access type for the queue, enum wl1251_acx_channel_type */
+ u8 type;
+
+ /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
+ u8 tsid;
+
+ /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
+ u8 ps_scheme;
+
+ /* the tx queue ack policy, enum wl1251_acx_ack_policy */
+ u8 ack_policy;
+
+ u8 padding[3];
+
+ /* not supported */
+ u32 apsdconf[2];
+} __attribute__ ((packed));
+
/*************************************************************************
Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop);
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy);
#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726bd..0320b478bb3f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
kfree(cmd);
return ret;
}
+
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes)
+{
+ struct wl1251_cmd_scan *cmd;
+ int i, ret = 0;
+
+ wl1251_debug(DEBUG_CMD, "cmd scan");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
+ CFG_RX_MGMT_EN |
+ CFG_RX_BCN_EN);
+ cmd->params.scan_options = 0;
+ cmd->params.num_channels = n_channels;
+ cmd->params.num_probe_requests = n_probes;
+ cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ cmd->params.tid_trigger = 0;
+
+ for (i = 0; i < n_channels; i++) {
+ cmd->channels[i].min_duration =
+ cpu_to_le32(WL1251_SCAN_MIN_DURATION);
+ cmd->channels[i].max_duration =
+ cpu_to_le32(WL1251_SCAN_MAX_DURATION);
+ memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
+ memset(&cmd->channels[i].bssid_msb, 0xff, 2);
+ cmd->channels[i].early_termination = 0;
+ cmd->channels[i].tx_power_att = 0;
+ cmd->channels[i].channel = channels[i]->hw_value;
+ }
+
+ cmd->params.ssid_len = ssid_len;
+ if (ssid)
+ memcpy(cmd->params.ssid, ssid, ssid_len);
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd scan failed: %d", ret);
+ goto out;
+ }
+
+ wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+ if (cmd->header.status != CMD_STATUS_SUCCESS) {
+ wl1251_error("cmd scan status wasn't success: %d",
+ cmd->header.status);
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+{
+ struct wl1251_cmd_trigger_scan_to *cmd;
+ int ret;
+
+ wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->timeout = timeout;
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef5..4ad67cae94d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
#include "wl1251.h"
+#include <net/cfg80211.h>
+
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes);
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
/* unit ms */
#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_MIN_DURATION 30000
+#define WL1251_SCAN_MAX_DURATION 60000
+
+#define WL1251_SCAN_NUM_PROBES 3
-struct basic_scan_parameters {
+struct wl1251_scan_parameters {
u32 rx_config_options;
u32 rx_filter_options;
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
u8 tid_trigger;
u8 ssid_len;
- u32 ssid[8];
+ u8 ssid[32];
} __attribute__ ((packed));
-struct basic_scan_channel_parameters {
+struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
u32 max_duration; /* in TU */
u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
-struct cmd_scan {
+struct wl1251_cmd_scan {
struct wl1251_cmd_header header;
- struct basic_scan_parameters params;
- struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+ struct wl1251_scan_parameters params;
+ struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
} __attribute__ ((packed));
enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f83..0ccba57fb9fb 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1251_open_file_generic,
};
+static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1251 *wl = file->private_data;
+ char buf[3], status;
+ int len;
+
+ if (wl->tx_queue_stopped)
+ status = 's';
+ else
+ status = 'r';
+
+ len = scnprintf(buf, sizeof(buf), "%c\n", status);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations tx_queue_status_ops = {
+ .read = tx_queue_status_read,
+ .open = wl1251_open_file_generic,
+};
+
static void wl1251_debugfs_delete_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_DEL(tx_queue_len);
+ DEBUGFS_DEL(tx_queue_status);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
+ DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383eeb..5aad56ea7153 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
goto out;
}
+ wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
+ wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
+ wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
+ wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
+
out:
kfree(config);
return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885ea..269cefb3e7d4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
#include "wl1251.h"
+enum {
+ /* best effort/legacy */
+ AC_BE = 0,
+
+ /* background */
+ AC_BK = 1,
+
+ /* video */
+ AC_VI = 2,
+
+ /* voice */
+ AC_VO = 3,
+
+ /* broadcast dummy access category */
+ AC_BCAST = 4,
+
+ NUM_ACCESS_CATEGORIES = 4
+};
+
+/* following are defult values for the IE fields*/
+#define CWMIN_BK 15
+#define CWMIN_BE 15
+#define CWMIN_VI 7
+#define CWMIN_VO 3
+#define CWMAX_BK 1023
+#define CWMAX_BE 63
+#define CWMAX_VI 15
+#define CWMAX_VO 7
+
+/* slot number setting to start transmission at PIFS interval */
+#define AIFS_PIFS 1
+
+/*
+ * slot number setting to start transmission at DIFS interval - normal DCF
+ * access
+ */
+#define AIFS_DIFS 2
+
+#define AIFSN_BK 7
+#define AIFSN_BE 3
+#define AIFSN_VI AIFS_PIFS
+#define AIFSN_VO AIFS_PIFS
+#define TXOP_BK 0
+#define TXOP_BE 0
+#define TXOP_VI 3008
+#define TXOP_VO 1504
+
int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
int wl1251_hw_init_templates_config(struct wl1251 *wl);
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa5..24ae6a360ac8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
+ wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
ieee80211_stop_queues(wl->hw);
/*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) {
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
@@ -552,7 +553,7 @@ out:
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static int wl1251_build_null_data(struct wl1251 *wl)
+static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
- struct wl12xx_null_data_template template;
+ struct ieee80211_qos_hdr template;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
-
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
-
- return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
- sizeof(template));
-
-}
-
-static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
-{
- struct wl12xx_ps_poll_template template;
+ memset(&template, 0, sizeof(template));
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
- return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
+ return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
-
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -634,26 +617,34 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->psm_requested = true;
+ wl->dtim_period = conf->ps_dtim_period;
+
+ ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+ wl->dtim_period);
+
/*
- * We enter PSM only if we're already associated.
- * If we're not, we'll enter it when joining an SSID,
- * through the bss_info_changed() hook.
+ * mac80211 enables PSM only if we're already associated.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
- if (wl->psm)
+ if (wl->psm) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
- goto out;
+ goto out_sleep;
wl->power_level = conf->power_level;
}
@@ -864,199 +855,61 @@ out:
return ret;
}
-static int wl1251_build_basic_rates(char *rates)
-{
- u8 index = 0;
-
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- return index;
-}
-
-static int wl1251_build_extended_rates(char *rates)
+static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
{
- u8 index = 0;
-
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
-
- return index;
-}
-
+ struct wl1251 *wl = hw->priv;
+ struct sk_buff *skb;
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+ int ret;
-static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len)
-{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
-
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1251_build_basic_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1251_build_extended_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
-
- return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
- size);
-}
+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 num_channels,
- u8 probe_requests)
-{
- struct wl1251_cmd_trigger_scan_to *trigger = NULL;
- struct cmd_scan *params = NULL;
- int i, ret;
- u16 scan_options = 0;
-
- if (wl->scanning)
- return -EINVAL;
-
- params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- params->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
- /* High priority scan */
- if (!active_scan)
- scan_options |= SCAN_PASSIVE;
- if (high_prio)
- scan_options |= SCAN_PRIORITY_HIGH;
- params->params.scan_options = scan_options;
-
- params->params.num_channels = num_channels;
- params->params.num_probe_requests = probe_requests;
- params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
- params->params.tid_trigger = 0;
-
- for (i = 0; i < num_channels; i++) {
- params->channels[i].min_duration = cpu_to_le32(30000);
- params->channels[i].max_duration = cpu_to_le32(60000);
- memset(&params->channels[i].bssid_lsb, 0xff, 4);
- memset(&params->channels[i].bssid_msb, 0xff, 2);
- params->channels[i].early_termination = 0;
- params->channels[i].tx_power_att = 0;
- params->channels[i].channel = i + 1;
- memset(params->channels[i].pad, 0, 3);
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
}
- for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
- memset(&params->channels[i], 0,
- sizeof(struct basic_scan_channel_parameters));
-
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
- } else {
- params->params.ssid_len = 0;
- memset(params->params.ssid, 0, 32);
- }
+ mutex_lock(&wl->mutex);
- ret = wl1251_build_probe_req(wl, ssid, len);
- if (ret < 0) {
- wl1251_error("PROBE request template failed");
+ if (wl->scanning) {
+ wl1251_debug(DEBUG_SCAN, "scan already in progress");
+ ret = -EINVAL;
goto out;
}
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!trigger)
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
goto out;
- trigger->timeout = 0;
-
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger));
- if (ret < 0) {
- wl1251_error("trigger scan to failed for hw scan");
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ req->ie, req->ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
}
- wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
-
- wl->scanning = true;
+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- wl1251_error("SCAN failed");
+ goto out_sleep;
- wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+ wl->scanning = true;
- if (params->header.status != CMD_STATUS_SUCCESS) {
- wl1251_error("TEST command answer error: %d",
- params->header.status);
+ ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
+ req->n_channels, WL1251_SCAN_NUM_PROBES);
+ if (ret < 0) {
wl->scanning = false;
- ret = -EIO;
- goto out;
- }
-
-out:
- kfree(params);
- return ret;
-
-}
-
-static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
-{
- struct wl1251 *wl = hw->priv;
- int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ goto out_sleep;
}
- mutex_lock(&wl->mutex);
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
-
+out_sleep:
wl1251_ps_elp_sleep(wl);
out:
@@ -1093,9 +946,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1251_cmd_ps_mode mode;
struct wl1251 *wl = hw->priv;
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +961,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- ret = wl1251_build_null_data(wl);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
+ skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
@@ -1124,27 +986,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
- wl->dtim_period = bss_conf->dtim_period;
- ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
- wl->dtim_period);
- wl->aid = bss_conf->aid;
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
- ret = wl1251_build_ps_poll(wl, wl->aid);
+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
+ skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, wl->aid);
+ ret = wl1251_acx_aid(wl, bss_conf->aid);
if (ret < 0)
goto out_sleep;
-
- /* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1251_ps_set_mode(wl, mode);
- if (ret < 0)
- goto out_sleep;
- }
} else {
/* use defaults when not associated */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
@@ -1176,7 +1032,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
- goto out;
+ goto out_sleep;
}
}
@@ -1187,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0) {
dev_kfree_skb(beacon);
- goto out;
+ goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1052,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(beacon);
if (ret < 0)
- goto out;
+ goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
- goto out;
+ goto out_sleep;
}
out_sleep:
@@ -1273,6 +1129,49 @@ static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 13, .center_freq = 2472},
};
+static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ enum wl1251_acx_ps_scheme ps_scheme;
+ struct wl1251 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* mac80211 uses units of 32 usec */
+ ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop * 32);
+ if (ret < 0)
+ goto out_sleep;
+
+ if (params->uapsd)
+ ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
+
+ ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
+ CHANNEL_TYPE_EDCF,
+ wl1251_tx_get_queue(queue), ps_scheme,
+ WL1251_ACX_ACK_POLICY_LEGACY);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
@@ -1293,6 +1192,7 @@ static const struct ieee80211_ops wl1251_ops = {
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
+ .conf_tx = wl1251_op_conf_tx,
};
static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1232,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER;
+ IEEE80211_HW_BEACON_FILTER |
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->queues = 4;
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff77..851dfb65e474 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
#include "wl1251_cmd.h"
#include "wl1251_io.h"
-#define WL1251_WAKEUP_TIMEOUT 2000
+/* in ms */
+#define WL1251_WAKEUP_TIMEOUT 100
void wl1251_elp_work(struct work_struct *work)
{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
int wl1251_ps_elp_wakeup(struct wl1251 *wl)
{
- unsigned long timeout;
+ unsigned long timeout, start;
u32 elp_reg;
if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
+ start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies) -
- (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
+ jiffies_to_msecs(jiffies - start));
wl->elp = false;
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbffc..b56732226cc0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
- skb = dev_alloc_skb(length);
+ skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1251_error("Couldn't allocate RX frame");
return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f85970615849..c8223185efd2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
- /* FIXME: how to get the correct queue id? */
- tx_hdr->xmit_queue = 0;
+ tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
/* align the buffer on a 4-byte boundary */
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
+ tx_hdr = (struct tx_double_buffer_desc *) skb->data;
} else {
wl1251_info("No handler, fixme!");
return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_mem_write(wl, addr, skb->data, len);
- wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
- tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
+ wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
+ "queue %d", tx_hdr->id, skb, tx_hdr->length,
+ tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c810..55856c6bb97a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
#define __WL1251_TX_H__
#include <linux/bitops.h>
+#include "wl1251_acx.h"
/*
*
@@ -209,6 +210,22 @@ struct tx_result {
u8 done_2;
} __attribute__ ((packed));
+static inline int wl1251_tx_get_queue(int queue)
+{
+ switch (queue) {
+ case 0:
+ return QOS_AC_VO;
+ case 1:
+ return QOS_AC_VI;
+ case 2:
+ return QOS_AC_BE;
+ case 3:
+ return QOS_AC_BK;
+ default:
+ return QOS_AC_BE;
+ }
+}
+
void wl1251_tx_work(struct work_struct *work);
void wl1251_tx_complete(struct wl1251 *wl);
void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861f..d0938db043b3 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -107,10 +107,9 @@ enum {
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
-
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_NVS_LEN 468
/*
* Enable/disable 802.11a support for WL1273
@@ -276,6 +275,7 @@ struct wl1271_debugfs {
struct dentry *retry_count;
struct dentry *excessive_retries;
+ struct dentry *gpio_power;
};
#define NUM_TX_QUEUES 4
@@ -322,6 +322,17 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
+#define WL1271_FLAG_STA_RATES_CHANGED (0)
+#define WL1271_FLAG_STA_ASSOCIATED (1)
+#define WL1271_FLAG_JOINED (2)
+#define WL1271_FLAG_GPIO_POWER (3)
+#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
+#define WL1271_FLAG_SCANNING (5)
+#define WL1271_FLAG_IN_ELP (6)
+#define WL1271_FLAG_PSM (7)
+#define WL1271_FLAG_PSM_REQUESTED (8)
+ unsigned long flags;
+
struct wl1271_partition_set part;
struct wl1271_chip chip;
@@ -359,7 +370,6 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue;
- bool tx_queue_stopped;
struct work_struct tx_work;
@@ -387,14 +397,15 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
- bool scanning;
struct wl1271_scan scan;
/* Our association ID */
u16 aid;
/* currently configured rate set */
+ u32 sta_rate_set;
u32 basic_rate_set;
+ u32 rate_set;
/* The current band */
enum ieee80211_band band;
@@ -405,18 +416,9 @@ struct wl1271 {
unsigned int rx_config;
unsigned int rx_filter;
- /* is firmware in elp mode */
- bool elp;
-
struct completion *elp_compl;
struct delayed_work elp_work;
- /* we can be in psm, but not in elp, we have to differentiate */
- bool psm;
-
- /* PSM mode requested */
- bool psm_requested;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -435,9 +437,6 @@ struct wl1271 {
struct ieee80211_vif *vif;
- /* Used for a workaround to send disconnect before rejoining */
- bool joined;
-
/* Current chipset configuration */
struct conf_drv_settings conf;
@@ -455,7 +454,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_MAX_LENGTH 20
-/* WL1271 needs a 200ms sleep after power on */
+/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
+ on in case is has been shut down shortly before */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
static inline bool wl1271_11a_enabled(void)
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7a..0b3434843476 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
return ret;
}
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
+{
+ struct acx_dco_itrim_params *dco;
+ struct conf_itrim_settings *c = &wl->conf.itrim;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
+
+ dco = kzalloc(sizeof(*dco), GFP_KERNEL);
+ if (!dco) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dco->enable = c->enable;
+ dco->timeout = cpu_to_le32(c->timeout);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
+ dco, sizeof(*dco));
+ if (ret < 0) {
+ wl1271_warning("failed to set dco itrim parameters: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(dco);
+ return ret;
+}
+
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
+int wl1271_acx_rate_policies(struct wl1271 *wl)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ int idx = 0;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
goto out;
}
- /* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = cpu_to_le32(1);
- acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
- acx->rate_class[0].short_retry_limit = c->short_retry_limit;
- acx->rate_class[0].long_retry_limit = c->long_retry_limit;
- acx->rate_class[0].aflags = c->aflags;
+ /* configure one basic rate class */
+ idx = ACX_TX_BASIC_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ /* configure one AP supported rate class */
+ idx = ACX_TX_AP_FULL_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
@@ -1012,59 +1051,6 @@ out:
return ret;
}
-int wl1271_acx_smart_reflex(struct wl1271 *wl)
-{
- struct acx_smart_reflex_state *sr_state = NULL;
- struct acx_smart_reflex_config_params *sr_param = NULL;
- int i, ret;
-
- wl1271_debug(DEBUG_ACX, "acx smart reflex");
-
- sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
- if (!sr_param) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
- struct conf_mart_reflex_err_table *e =
- &(wl->conf.init.sr_err_tbl[i]);
-
- sr_param->error_table[i].len = e->len;
- sr_param->error_table[i].upper_limit = e->upper_limit;
- memcpy(sr_param->error_table[i].values, e->values, e->len);
- }
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
- sr_param, sizeof(*sr_param));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
- sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
- if (!sr_state) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* enable smart reflex */
- sr_state->enable = wl->conf.init.sr_enable;
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
- sr_state, sizeof(*sr_state));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
-out:
- kfree(sr_state);
- kfree(sr_param);
- return ret;
-
-}
-
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1118,31 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_pm_config(struct wl1271 *wl)
+{
+ struct wl1271_acx_pm_config *acx = NULL;
+ struct conf_pm_config_settings *c = &wl->conf.pm_config;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx pm config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
+ acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
+
+ ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx pm config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a8128542..1bb63af64f0e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
u8 pad[3];
} __attribute__ ((packed));
-struct acx_smart_reflex_state {
+struct acx_dco_itrim_params {
struct acx_header header;
u8 enable;
u8 padding[3];
-} __attribute__ ((packed));
-
-struct smart_reflex_err_table {
- u8 len;
- s8 upper_limit;
- s8 values[14];
-} __attribute__ ((packed));
-
-struct acx_smart_reflex_config_params {
- struct acx_header header;
-
- struct smart_reflex_err_table error_table[3];
+ __le32 timeout;
} __attribute__ ((packed));
#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
u8 reserved;
};
+#define ACX_TX_BASIC_RATE 0
+#define ACX_TX_AP_FULL_RATE 1
+#define ACX_TX_RATE_POLICY_CNT 2
struct acx_rate_policy {
struct acx_header header;
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __attribute__ ((packed));
-#define ACX_RX_MEM_BLOCKS 64
-#define ACX_TX_MIN_MEM_BLOCKS 64
+#define ACX_RX_MEM_BLOCKS 70
+#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
#define ACX_NUM_SSID_PROFILES 1
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
used. */
} __attribute__((packed));
+struct wl1271_acx_pm_config {
+ struct acx_header header;
+
+ __le32 host_clk_settling_time;
+ u8 host_fast_wakeup_support;
+ u8 padding[3];
+} __attribute__ ((packed));
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
- ACX_SET_SMART_REFLEX_STATE = 0x005B,
- ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
+ ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
+ ACX_PM_CONFIG = 0x1016,
MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
void *mc_list, u32 mc_list_len);
int wl1271_acx_service_period_timeout(struct wl1271 *wl);
int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,7 +1069,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
+int wl1271_acx_rate_policies(struct wl1271 *wl);
int wl1271_acx_ac_cfg(struct wl1271 *wl);
int wl1271_acx_tid_cfg(struct wl1271 *wl);
int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1081,5 +1081,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
+int wl1271_acx_pm_config(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca3..e803b876f3f0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -225,9 +225,15 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
if (nvs == NULL)
return -ENODEV;
+ if (wl->nvs_len < WL1271_NVS_LEN)
+ return -EINVAL;
+
nvs_ptr = nvs;
- nvs_len = wl->nvs_len;
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = WL1271_NVS_LEN;
+
+ /* FIXME: read init settings from the remaining part of the NVS */
/* Update the device MAC address into the nvs */
nvs[11] = wl->mac_addr[0];
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246c..a74259bb596b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -209,6 +209,26 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
gen_parms->settings = g->settings;
+ gen_parms->sr_state = g->sr_state;
+
+ memcpy(gen_parms->srf1,
+ g->srf1,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->srf2,
+ g->srf2,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->srf3,
+ g->srf3,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->sr_debug_table,
+ g->sr_debug_table,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+
+ gen_parms->sr_sen_n_p = g->sr_sen_n_p;
+ gen_parms->sr_sen_n_p_gain = g->sr_sen_n_p_gain;
+ gen_parms->sr_sen_nrn = g->sr_sen_nrn;
+ gen_parms->sr_sen_prn = g->sr_sen_prn;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
if (ret < 0)
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -253,6 +273,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
CONF_NUMBER_OF_RATE_GROUPS);
+ memcpy(radio_parms->tx_rate_limits_extreme, r->tx_rate_limits_extreme,
+ CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
CONF_NUMBER_OF_CHANNELS_2_4);
@@ -263,6 +285,11 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
+ radio_parms->degraded_low_to_normal_threshold =
+ r->degraded_low_to_normal_threshold;
+ radio_parms->degraded_normal_to_high_threshold =
+ r->degraded_normal_to_high_threshold;
+
for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
radio_parms->tx_ref_pd_voltage_5[i] =
@@ -275,6 +302,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_rate_limits_degraded_5,
r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
+ memcpy(radio_parms->tx_rate_limits_extreme_5,
+ r->tx_rate_limits_extreme_5, CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_channel_limits_ofdm_5,
r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
@@ -283,6 +312,10 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->rx_fem_insertion_loss_5,
r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
+ radio_parms->degraded_low_to_normal_threshold_5 =
+ r->degraded_low_to_normal_threshold_5;
+ radio_parms->degraded_normal_to_high_threshold_5 =
+ r->degraded_normal_to_high_threshold_5;
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
@@ -311,19 +344,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
do_cal = false;
}
- /* FIXME: This is a workaround, because with the current stack, we
- * cannot know when we have disassociated. So, if we have already
- * joined, we disconnect before joining again. */
- if (wl->joined) {
- ret = wl1271_cmd_disconnect(wl);
- if (ret < 0) {
- wl1271_error("failed to disconnect before rejoining");
- goto out;
- }
-
- wl->joined = false;
- }
-
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join) {
ret = -ENOMEM;
@@ -388,8 +408,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- wl->joined = true;
-
/*
* ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
* simplify locking we just sleep instead, for now
@@ -487,7 +505,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
return 0;
}
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
@@ -501,7 +519,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
goto out;
}
- cmd->channel = channel;
+ /* the channel here is only used for calibration, so hardcoded to 1 */
+ cmd->channel = 1;
if (enable) {
cmd_rx = CMD_ENABLE_RX;
@@ -514,22 +533,22 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("rx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
return ret;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
out:
kfree(cmd);
@@ -636,7 +655,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
channels = wl->hw->wiphy->bands[ieee_band]->channels;
n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
- if (wl->scanning)
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +730,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
- wl->scanning = true;
+ set_bit(WL1271_FLAG_SCANNING, &wl->flags);
if (wl1271_11a_enabled()) {
wl->scan.state = band;
if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +748,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
goto out;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb9229..09fe91297acf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,7 +37,7 @@ int wl1271_cmd_join(struct wl1271 *wl);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable);
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
@@ -437,6 +437,21 @@ struct wl1271_general_parms_cmd {
u8 tx_bip_fem_autodetect;
u8 tx_bip_fem_manufacturer;
u8 settings;
+
+ u8 sr_state;
+
+ s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
+
+ u8 padding[3];
} __attribute__ ((packed));
struct wl1271_radio_parms_cmd {
@@ -458,11 +473,12 @@ struct wl1271_radio_parms_cmd {
/* Dynamic radio parameters */
/* 2.4GHz */
__le16 tx_ref_pd_voltage;
- s8 tx_ref_power;
+ u8 tx_ref_power;
s8 tx_offset_db;
s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -471,15 +487,19 @@ struct wl1271_radio_parms_cmd {
u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
u8 rx_fem_insertion_loss;
- u8 padding2;
+ u8 degraded_low_to_normal_threshold;
+ u8 degraded_normal_to_high_threshold;
+
+ u8 padding1; /* our own padding, not in ref driver */
/* 5GHz */
__le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -488,7 +508,10 @@ struct wl1271_radio_parms_cmd {
s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 padding3[2];
+ u8 degraded_low_to_normal_threshold_5;
+ u8 degraded_normal_to_high_threshold_5;
+
+ u8 padding2[2];
} __attribute__ ((packed));
struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede265..1993d63c214e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
#define CONF_TX_MAX_RATE_CLASSES 8
#define CONF_TX_RATE_MASK_UNSPECIFIED 0
-#define CONF_TX_RATE_MASK_ALL 0x1eff
+#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
u8 psm_entry_retries;
};
-#define CONF_SR_ERR_TBL_MAX_VALUES 14
-
-struct conf_mart_reflex_err_table {
- /*
- * Length of the error table values table.
- *
- * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
- */
- u8 len;
-
- /*
- * Smart Reflex error table upper limit.
- *
- * Range: s8
- */
- s8 upper_limit;
-
- /*
- * Smart Reflex error table values.
- *
- * Range: s8
- */
- s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
-};
-
enum {
CONF_REF_CLK_19_2_E,
CONF_REF_CLK_26_E,
@@ -759,6 +735,9 @@ enum single_dual_band_enum {
CONF_DUAL_BAND
};
+
+#define CONF_MAX_SMART_REFLEX_PARAMS 16
+
struct conf_general_parms {
/*
* RF Reference Clock type / speed
@@ -815,6 +794,20 @@ struct conf_general_parms {
* Range: Unknown
*/
u8 settings;
+
+ /* Smart reflex settings */
+ u8 sr_state;
+
+ s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
};
#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
@@ -847,12 +840,13 @@ struct conf_radio_parms {
*
* Range: unknown
*/
- s16 tx_ref_pd_voltage;
- s8 tx_ref_power;
+ u16 tx_ref_pd_voltage;
+ u8 tx_ref_power;
s8 tx_offset_db;
s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -861,17 +855,22 @@ struct conf_radio_parms {
u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
u8 rx_fem_insertion_loss;
+ u8 degraded_low_to_normal_threshold;
+ u8 degraded_normal_to_high_threshold;
+
+
/*
* Dynamic radio parameters for 5GHz
*
* Range: unknown
*/
- s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -879,33 +878,46 @@ struct conf_radio_parms {
/* FIXME: this is inconsistent with the types for 2.4GHz */
s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
-};
-#define CONF_SR_ERR_TBL_COUNT 3
+ u8 degraded_low_to_normal_threshold_5;
+ u8 degraded_normal_to_high_threshold_5;
+};
struct conf_init_settings {
/*
- * Configure Smart Reflex error table values.
+ * Configure general parameters.
*/
- struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+ struct conf_general_parms genparam;
/*
- * Smart Reflex enable flag.
- *
- * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
+ * Configure radio parameters.
*/
- u8 sr_enable;
+ struct conf_radio_parms radioparam;
+};
+
+struct conf_itrim_settings {
+ /* enable dco itrim */
+ u8 enable;
+
+ /* moderation timeout in microsecs from the last TX */
+ u32 timeout;
+};
+
+struct conf_pm_config_settings {
/*
- * Configure general parameters.
+ * Host clock settling time
+ *
+ * Range: 0 - 30000 us
*/
- struct conf_general_parms genparam;
+ u32 host_clk_settling_time;
/*
- * Configure radio parameters.
+ * Host fast wakeup support
+ *
+ * Range: true, false
*/
- struct conf_radio_parms radioparam;
-
+ bool host_fast_wakeup_support;
};
struct conf_drv_settings {
@@ -914,6 +926,8 @@ struct conf_drv_settings {
struct conf_tx_settings tx;
struct conf_conn_settings conn;
struct conf_init_settings init;
+ struct conf_itrim_settings itrim;
+ struct conf_pm_config_settings pm_config;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f8964..8d7588ca68fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1271_open_file_generic,
};
+static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "%d\n", state);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t gpio_power_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in gpio_power");
+ goto out;
+ }
+
+ if (value) {
+ wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ } else {
+ wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations gpio_power_ops = {
+ .read = gpio_power_read,
+ .write = gpio_power_write,
+ .open = wl1271_open_file_generic
+};
+
static void wl1271_debugfs_delete_files(struct wl1271 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
DEBUGFS_DEL(tx_queue_len);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
+
+ DEBUGFS_DEL(gpio_power);
}
static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
+ DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
+
out:
if (ret < 0)
wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85c..0a145afc9905 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -35,7 +35,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- if (wl->scanning) {
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
NULL, size);
@@ -43,7 +43,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
wl->scan.active,
wl->scan.high_prio,
@@ -62,7 +62,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
}
}
return 0;
@@ -78,7 +78,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
switch (mbox->ps_status) {
case EVENT_ENTER_POWER_SAVE_FAIL:
- if (!wl->psm) {
+ if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl->psm_entry_retry = 0;
break;
}
@@ -89,7 +89,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
} else {
wl1271_error("PSM entry failed, giving up.\n");
wl->psm_entry_retry = 0;
- *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -136,7 +135,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
*/
- if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
+ if (vector & BSS_LOSE_EVENT_ID &&
+ test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
/* indicate to the stack, that beacons have been lost */
@@ -150,7 +150,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (beacon_loss) {
+ if (wl->vif && beacon_loss) {
/* Obviously, it's dangerous to release the mutex while
we are holding many of the variables in the wl struct.
That's why it's done last in the function, and care must
@@ -184,7 +184,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl)
wl->mbox_ptr[0], wl->mbox_ptr[1]);
}
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
struct event_mailbox mbox;
int ret;
@@ -204,9 +204,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return ret;
/* then we let the firmware know it can go on...*/
- if (do_ack)
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
- INTR_TRIG_EVENT_ACK);
+ wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a8..278f9206aa56 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf1..c9848eecb767 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -229,6 +229,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Initialize connection monitoring thresholds */
ret = wl1271_acx_conn_monit_params(wl);
if (ret < 0)
@@ -280,12 +284,12 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
+ ret = wl1271_acx_rate_policies(wl);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
@@ -299,8 +303,8 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Configure smart reflex */
- ret = wl1271_acx_smart_reflex(wl);
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42fe..e4867b895c43 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -47,6 +47,8 @@
#include "wl1271_cmd.h"
#include "wl1271_boot.h"
+#define WL1271_BOOT_RETRIES 3
+
static struct conf_drv_settings default_conf = {
.sg = {
.per_threshold = 7500,
@@ -67,16 +69,17 @@ static struct conf_drv_settings default_conf = {
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = 2347,
- .rx_cca_threshold = 0xFFEF,
- .irq_blk_threshold = 0,
- .irq_pkt_threshold = USHORT_MAX,
- .irq_timeout = 5,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
+ CONF_HW_BIT_RATE_2MBPS,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -172,8 +175,8 @@ static struct conf_drv_settings default_conf = {
}
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .tx_compl_timeout = 5,
- .tx_compl_threshold = 5
+ .tx_compl_timeout = 700,
+ .tx_compl_threshold = 4
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +189,12 @@ static struct conf_drv_settings default_conf = {
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
}
},
- .synch_fail_thold = 5,
+ .synch_fail_thold = 10,
.bss_lose_timeout = 100,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
- .ps_poll_threshold = 4,
+ .ps_poll_threshold = 20,
.sig_trigger_count = 2,
.sig_trigger = {
[0] = {
@@ -226,46 +229,35 @@ static struct conf_drv_settings default_conf = {
.psm_entry_retries = 3
},
.init = {
- .sr_err_tbl = {
- [0] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- },
- [1] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
- 0x00 }
- },
- [2] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- }
- },
- .sr_enable = 1,
.genparam = {
.ref_clk = CONF_REF_CLK_38_4_E,
.settling_time = 5,
.clk_valid_on_wakeup = 0,
.dc2dcmode = 0,
.single_dual_band = CONF_SINGLE_BAND,
- .tx_bip_fem_autodetect = 0,
+ .tx_bip_fem_autodetect = 1,
.tx_bip_fem_manufacturer = 1,
.settings = 1,
+ .sr_state = 1,
+ .srf1 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .srf2 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .srf3 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .sr_debug_table = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 },
+ .sr_sen_n_p = 0,
+ .sr_sen_n_p_gain = 0,
+ .sr_sen_nrn = 0,
+ .sr_sen_prn = 0,
},
.radioparam = {
- .rx_trace_loss = 10,
- .tx_trace_loss = 10,
+ .rx_trace_loss = 0x24,
+ .tx_trace_loss = 0x0,
.rx_rssi_and_proc_compens = {
0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
- 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
+ 0xfc, 0x00, 0x80, 0x10, 0xf0, 0xf8,
0x00, 0x0a, 0x14 },
.rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
.tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
@@ -273,13 +265,15 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 },
- .tx_ref_pd_voltage = 0x24e,
- .tx_ref_power = 0x78,
+ .tx_ref_pd_voltage = 0x1a9,
+ .tx_ref_power = 0x80,
.tx_offset_db = 0x0,
.tx_rate_limits_normal = {
- 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
+ 0x1d, 0x1f, 0x24, 0x28, 0x28, 0x29 },
.tx_rate_limits_degraded = {
- 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
+ 0x19, 0x1f, 0x22, 0x23, 0x27, 0x28 },
+ .tx_rate_limits_extreme = {
+ 0x19, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
.tx_channel_limits_11b = {
0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
@@ -289,10 +283,12 @@ static struct conf_drv_settings default_conf = {
0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
0x20, 0x50 },
.tx_pdv_rate_offsets = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ 0x07, 0x08, 0x04, 0x02, 0x02, 0x00 },
.tx_ibias = {
- 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
- .rx_fem_insertion_loss = 0x14,
+ 0x11, 0x11, 0x15, 0x11, 0x15, 0x0f },
+ .rx_fem_insertion_loss = 0x0e,
+ .degraded_low_to_normal_threshold = 0x1e,
+ .degraded_normal_to_high_threshold = 0x2d,
.tx_ref_pd_voltage_5 = {
0x0190, 0x01a4, 0x01c3, 0x01d8,
0x020a, 0x021c },
@@ -304,6 +300,8 @@ static struct conf_drv_settings default_conf = {
0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
.tx_rate_limits_degraded_5 = {
0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+ .tx_rate_limits_extreme_5 = {
+ 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
.tx_channel_limits_ofdm_5 = {
0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
@@ -315,8 +313,18 @@ static struct conf_drv_settings default_conf = {
.tx_ibias_5 = {
0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
.rx_fem_insertion_loss_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
+ .degraded_low_to_normal_threshold_5 = 0x00,
+ .degraded_normal_to_high_threshold_5 = 0x00
}
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = false
}
};
@@ -359,7 +367,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
return ret;
@@ -374,11 +382,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
static void wl1271_power_off(struct wl1271 *wl)
{
wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_power_on(struct wl1271 *wl)
{
wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_fw_status(struct wl1271 *wl,
@@ -447,14 +457,13 @@ static void wl1271_irq_work(struct work_struct *work)
intr &= WL1271_INTR_MASK;
if (intr & WL1271_ACX_INTR_EVENT_A) {
- bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0, do_ack);
+ wl1271_event_handle(wl, 0);
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1, true);
+ wl1271_event_handle(wl, 1);
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -614,6 +623,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
struct wl1271_partition_set partition;
int ret = 0;
+ msleep(WL1271_PRE_POWER_ON_SLEEP);
wl1271_power_on(wl);
msleep(WL1271_POWER_ON_SLEEP);
wl1271_spi_reset(wl);
@@ -643,7 +653,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +661,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
default:
- wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
- goto out_power_off;
+ goto out;
}
if (wl->fw == NULL) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
/* No NVS from netlink, try to get it from the filesystem */
if (wl->nvs == NULL) {
ret = wl1271_fetch_nvs(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
- goto out;
-
-out_power_off:
- wl1271_power_off(wl);
-
out:
return ret;
}
int wl1271_plt_start(struct wl1271 *wl)
{
+ int retries = WL1271_BOOT_RETRIES;
int ret;
mutex_lock(&wl->mutex);
@@ -696,35 +702,48 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->state = WL1271_STATE_PLT;
-
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- /* Make sure power saving is disabled */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_plt_init(wl);
+ if (ret < 0)
+ goto irq_disable;
- goto out;
+ /* Make sure power saving is disabled */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_PLT;
+ wl1271_notice("firmware booted in PLT mode (%s)",
+ wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot in PLT mode failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -762,7 +781,20 @@ out:
static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txinfo->control.sta;
+ unsigned long flags;
+ /* peek into the rates configured in the STA entry */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
+ wl->sta_rate_set = sta->supp_rates[conf->channel->band];
+ set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /* queue the packet */
skb_queue_tail(&wl->tx_queue, skb);
/*
@@ -784,7 +816,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* protected. Maybe fix this by removing the stupid
* variable altogether and checking the real queue state?
*/
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
return NETDEV_TX_OK;
@@ -880,6 +912,7 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ int retries = WL1271_BOOT_RETRIES;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +926,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
goto out;
}
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- ret = wl1271_hw_init(wl);
- if (ret < 0)
- goto out_irq_disable;
-
- wl->state = WL1271_STATE_ON;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_hw_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_ON;
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -944,11 +989,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1271_STATE_ON);
- if (wl->scanning) {
+ if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
- wl->scanning = false;
}
wl->state = WL1271_STATE_OFF;
@@ -973,10 +1017,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
- wl->elp = false;
- wl->psm = 0;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
wl->tx_results_count = 0;
@@ -986,7 +1027,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_security_seq_32 = 0;
wl->time_offset = 0;
wl->session_counter = 0;
- wl->joined = false;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1039,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
}
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -1010,9 +1053,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -1032,7 +1075,7 @@ out:
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
@@ -1109,6 +1152,51 @@ out:
}
#endif
+static int wl1271_join_channel(struct wl1271 *wl, int channel)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
+
+ /* the dummy join is not required for ad-hoc */
+ if (wl->bss_type == BSS_TYPE_IBSS)
+ goto out;
+
+ /* disable mac filter, so we hear everything */
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+
+ wl->channel = channel;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+
+out:
+ return ret;
+}
+
+static int wl1271_unjoin_channel(struct wl1271 *wl)
+{
+ int ret;
+
+ /* to stop listening to a channel, we disconnect */
+ ret = wl1271_cmd_disconnect(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_JOINED, &wl->flags);
+ wl->channel = 0;
+ memset(wl->bssid, 0, ETH_ALEN);
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+out:
+ return ret;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1117,10 +1205,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level);
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
mutex_lock(&wl->mutex);
@@ -1130,34 +1219,44 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
- if (channel != wl->channel) {
- /*
- * We assume that the stack will configure the right channel
- * before associating, so we don't need to send a join
- * command here. We will join the right channel when the
- * BSSID changes
- */
- wl->channel = channel;
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (conf->flags & IEEE80211_CONF_IDLE &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_unjoin_channel(wl);
+ else if (!(conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_join_channel(wl, channel);
+
+ if (conf->flags & IEEE80211_CONF_IDLE) {
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl1271_acx_rate_policies(wl);
+ }
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
- wl1271_info("psm enabled");
+ /* if the channel changes while joined, join again */
+ if (channel != wl->channel && test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_join_channel(wl, channel);
- wl->psm_requested = true;
+ if (conf->flags & IEEE80211_CONF_PS &&
+ !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ wl1271_info("psm enabled");
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ }
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
wl1271_info("psm disabled");
- wl->psm_requested = false;
+ clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
- if (wl->psm)
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags))
ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
}
@@ -1440,22 +1539,6 @@ out:
return ret;
}
-static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
-{
- struct ieee80211_supported_band *band;
- u32 enabled_rates = 0;
- int bit;
-
- band = wl->hw->wiphy->bands[wl->band];
- for (bit = 0; bit < band->n_bitrates; bit++) {
- if (basic_rate_set & 0x1)
- enabled_rates |= band->bitrates[bit].hw_value;
- basic_rate_set >>= 1;
- }
-
- return enabled_rates;
-}
-
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1473,9 +1556,68 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if ((changed & BSS_CHANGED_BSSID) &&
+ /*
+ * Now we know the correct bssid, so we send a new join command
+ * and enable the BSSID filter
+ */
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ ret = wl1271_cmd_build_null_data(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd buld null data failed %d",
+ ret);
+ goto out_sleep;
+ }
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd join failed %d", ret);
+ goto out_sleep;
+ }
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+ }
+
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ /* FIXME: This implements rudimentary ad-hoc support -
+ proper templates are on the wish list and notification
+ on when they change. This patch will update the templates
+ on every call to this function. Also, the firmware will not
+ answer to probe-requests as it does not have the proper
+ SSID set in the JOIN command. The probe-response template
+ is set nevertheless, as the FW will ASSERT without it */
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ struct ieee80211_hdr *hdr;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
+ beacon->data,
+ beacon->len);
+
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out_sleep;
+ }
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(
+ IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len);
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out_sleep;
+ }
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->aid = bss_conf->aid;
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
/*
* with wl1271, we don't need to update the
@@ -1492,7 +1634,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
goto out_sleep;
/* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
+ !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode);
if (ret < 0)
@@ -1500,7 +1643,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
} else {
/* use defaults when not associated */
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
}
@@ -1535,17 +1678,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- wl->basic_rate_set = wl1271_enabled_rates_get(
- wl, bss_conf->basic_rates);
-
- ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
- if (ret < 0) {
- wl1271_warning("Set rate policies failed %d", ret);
- goto out_sleep;
- }
- }
-
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -1599,19 +1731,19 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412},
- { .hw_value = 2, .center_freq = 2417},
- { .hw_value = 3, .center_freq = 2422},
- { .hw_value = 4, .center_freq = 2427},
- { .hw_value = 5, .center_freq = 2432},
- { .hw_value = 6, .center_freq = 2437},
- { .hw_value = 7, .center_freq = 2442},
- { .hw_value = 8, .center_freq = 2447},
- { .hw_value = 9, .center_freq = 2452},
- { .hw_value = 10, .center_freq = 2457},
- { .hw_value = 11, .center_freq = 2462},
- { .hw_value = 12, .center_freq = 2467},
- { .hw_value = 13, .center_freq = 2472},
+ { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
+ { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
+ { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
+ { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
+ { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
+ { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
+ { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
+ { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
/* can't be const, mac80211 writes to this */
@@ -1757,7 +1889,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_PS;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
@@ -1818,21 +1951,18 @@ static int __devinit wl1271_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->scanning = false;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl->elp = false;
- wl->psm = 0;
- wl->psm_requested = false;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
- wl->joined = false;
+ wl->flags = 0;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7eed..e407790f6771 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -39,12 +39,13 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->elp || !wl->psm)
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
+ !test_bit(WL1271_FLAG_PSM, &wl->flags))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
- wl->elp = true;
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
mutex_unlock(&wl->mutex);
@@ -55,7 +56,7 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +71,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
u32 start_time = jiffies;
bool pending = false;
- if (!wl->elp)
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +102,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
}
}
- wl->elp = false;
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
@@ -143,7 +144,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- wl->psm = 1;
+ set_bit(WL1271_FLAG_PSM, &wl->flags);
break;
case STATION_ACTIVE_MODE:
default:
@@ -166,7 +167,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- wl->psm = 0;
+ clear_bit(WL1271_FLAG_PSM, &wl->flags);
break;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c7..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
-/*
- * Interrupt registers.
- * 64 bit interrupt sources registers ws ced.
- * sme interupts were removed and new ones were added.
- * Order was changed.
- */
-#define FIQ_MASK (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
-#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
-#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
-#define IRQ_MASK (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
-#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
-#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
-#define ECPU_MASK (REGISTERS_BASE + 0x0448)
-#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
-#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
-#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
-#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
-#define INT_STS_ND (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
-#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
-#define INT_ACK (REGISTERS_BASE + 0x046C)
-#define INT_ACK_L (REGISTERS_BASE + 0x046C)
-#define INT_ACK_H (REGISTERS_BASE + 0x0470)
-#define INT_TRIG (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
-#define HOST_STS_L (REGISTERS_BASE + 0x045C)
-#define HOST_STS_H (REGISTERS_BASE + 0x0460)
-#define HOST_MASK (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
-#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
-#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
-/* Host Interrupts*/
-#define HINT_MASK (REGISTERS_BASE + 0x0494)
-#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
-#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
-#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
-/*1150 spec calls this HINT_STS_RAW*/
-#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
-#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
-#define HINT_ACK (REGISTERS_BASE + 0x04A8)
-#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
-
/*=============================================
Host Interrupt Mask Register - 32bit (RW)
------------------------------------------
@@ -433,16 +370,6 @@
/*===============================================
- Phy regs
- ===============================================*/
-#define ACX_PHY_ADDR_REG SBB_ADDR
-#define ACX_PHY_DATA_REG SBB_DATA
-#define ACX_PHY_CTRL_REG SBB_CTL
-#define ACX_PHY_REG_WR_MASK 0x00000001ul
-#define ACX_PHY_REG_RD_MASK 0x00000002ul
-
-
-/*===============================================
EEPROM Read/Write Request 32bit RW
------------------------------------------
1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
#define ACX_CONT_WIND_MIN_MASK 0x0000007f
#define ACX_CONT_WIND_MAX 0x03ff0000
-/*
- * Indirect slave register/memory registers
- * ----------------------------------------
- */
-#define HW_SLAVE_REG_ADDR_REG 0x00000004
-#define HW_SLAVE_REG_DATA_REG 0x00000008
-#define HW_SLAVE_REG_CTRL_REG 0x0000000c
-
-#define SLAVE_AUTO_INC 0x00010000
-#define SLAVE_NO_AUTO_INC 0x00000000
-#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
-
-#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
-#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
-#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
-#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
-
-#define HW_FUNC_EVENT_INT_EN 0x8000
-#define HW_FUNC_EVENT_MASK_REG 0x00000034
-
-#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
-
/*===============================================
HI_CFG Interface Configuration Register Values
------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
******************************************************************************/
-#define TNETW1251_CHIP_ID_PG1_0 0x07010101
-#define TNETW1251_CHIP_ID_PG1_1 0x07020101
-#define TNETW1251_CHIP_ID_PG1_2 0x07030101
-
/*************************************************************************
Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e732..ee9564aa6ecc 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -397,8 +397,7 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
/* poll for data ready */
do {
val = wl1271_spi_read32(wl, OCP_DATA_READ);
- timeout--;
- } while (!(val & OCP_READY_MASK) && timeout);
+ } while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c2..a288cc317d7b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -121,6 +121,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+ /* if the packets are destined for AP (have a STA entry) send them
+ with AP rate policies, otherwise use default basic rates */
+ if (control->control.sta)
+ tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -214,18 +219,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
+static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+{
+ struct ieee80211_supported_band *band;
+ u32 enabled_rates = 0;
+ int bit;
+
+ band = wl->hw->wiphy->bands[wl->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= band->bitrates[bit].hw_value;
+ rate_set >>= 1;
+ }
+
+ return enabled_rates;
+}
+
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb;
bool woken_up = false;
+ u32 sta_rates = 0;
int ret;
+ /* check if the rates supported by the AP have changed */
+ if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
+ &wl->flags))) {
+ unsigned long flags;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ sta_rates = wl->sta_rate_set;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ /* if rates have changed, re-configure the rate policy */
+ if (unlikely(sta_rates)) {
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
+ wl1271_acx_rate_policies(wl);
+ }
+
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +277,18 @@ void wl1271_tx_work(struct work_struct *work)
wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
"stop queues");
ieee80211_stop_queues(wl->hw);
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
- } else if (wl->tx_queue_stopped) {
+ } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
+ &wl->flags)) {
/* firmware buffer has space, restart queues */
wl1271_debug(DEBUG_TX,
"complete_packet: waking queues");
ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
}
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 33c8be7ec8e6..5d2b52f4717f 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -879,16 +879,15 @@ static void zd1201_set_multicast(struct net_device *dev)
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
- if (dev->mc_count > ZD1201_MAXMULTI)
+ if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
return;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
mc = mc->next;
}
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- dev->mc_count*ETH_ALEN, 0);
-
+ netdev_mc_count(dev) * ETH_ALEN, 0);
}
static int zd1201_config_commit(struct net_device *dev,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index f14deb0c8514..2d555cc30508 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- mac->type = conf->type;
+ mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, conf->mac_addr);
+ return zd_write_mac_addr(&mac->chip, vif->addr);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72d3e437e190..442fc1117326 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1079,11 +1079,15 @@ static int eject_installer(struct usb_interface *intf)
int r;
/* Find bulk out endpoint */
- endpoint = &iface_desc->endpoint[1].desc;
- if (usb_endpoint_dir_out(endpoint) &&
- usb_endpoint_xfer_bulk(endpoint)) {
- bulk_out_ep = endpoint->bEndpointAddress;
- } else {
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
dev_err(&udev->dev,
"zd1211rw: Could not find bulk out endpoint\n");
return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b3..1a74594224b1 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -22,11 +22,17 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
+#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
+#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
+#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
@@ -37,6 +43,22 @@
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
+/* MDIO Address Register Bit Masks */
+#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
+#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
+#define XEL_MDIOADDR_PHYADR_SHIFT 5
+#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
+
+/* MDIO Write Data Register Bit Masks */
+#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
+
+/* MDIO Read Data Register Bit Masks */
+#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
+
+/* MDIO Control Register Bit Masks */
+#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
+#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
+
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
@@ -87,6 +109,12 @@
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
+ * @phy_dev: pointer to the PHY device
+ * @phy_node: pointer to the PHY device node
+ * @mii_bus: pointer to the MII bus
+ * @mdio_irqs: IRQs table for MDIO bus
+ * @last_link: last link status
+ * @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
@@ -100,6 +128,15 @@ struct net_local {
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
+
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+
+ struct mii_bus *mii_bus;
+ int mdio_irqs[PHY_MAX_ADDR];
+
+ int last_link;
+ bool has_mdio;
};
@@ -431,7 +468,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/**
- * xemaclite_set_mac_address - Set the MAC address for this device
+ * xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
@@ -441,8 +478,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
-static void xemaclite_set_mac_address(struct net_local *drvdata,
- u8 *address_ptr)
+static void xemaclite_update_address(struct net_local *drvdata,
+ u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -465,6 +502,30 @@ static void xemaclite_set_mac_address(struct net_local *drvdata,
}
/**
+ * xemaclite_set_mac_address - Set the MAC address for this device
+ * @dev: Pointer to the network device instance
+ * @addr: Void pointer to the sockaddr structure
+ *
+ * This function copies the HW address from the sockaddr strucutre to the
+ * net_device structure and updates the address in HW.
+ *
+ * Return: Error if the net device is busy or 0 if the addr is set
+ * successfully
+ */
+static int xemaclite_set_mac_address(struct net_device *dev, void *address)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct sockaddr *addr = address;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ xemaclite_update_address(lp, dev->dev_addr);
+ return 0;
+}
+
+/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
@@ -641,12 +702,219 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/**********************/
+/* MDIO Bus functions */
+/**********************/
+
+/**
+ * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emaclite device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+
+static int xemaclite_mdio_wait(struct net_local *lp)
+{
+ long end = jiffies + 2;
+
+ /* wait for the MDIO interface to not be busy or timeout
+ after some time.
+ */
+ while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ XEL_MDIOCTRL_MDIOSTS_MASK) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_read - Read from a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to read from
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the phy address to the MDIO Address register
+ * and reads data from MDIO Read Data register, when its available.
+ *
+ * Return: Value read from the MII management register
+ */
+static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+ u32 rc;
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and set the OP bit in the
+ * MDIO Address register. Set the Status bit in the MDIO Control
+ * register to start a MDIO read transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * xemaclite_mdio_write - Write to a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to write to
+ * @val: value to write to the register number specified by reg
+ *
+ * This fucntion waits till the device is ready to accept a new MDIO
+ * request and then writes the val to the MDIO Write Data register.
+ */
+static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and clear the OP bit in the
+ * MDIO Address register and then write the value into the MDIO Write
+ * Data register. Finally, set the Status bit in the MDIO Control
+ * register to start a MDIO write transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ ~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_reset - Reset the mdio bus.
+ * @bus: Pointer to the MII bus
+ *
+ * This function is required(?) as per Documentation/networking/phy.txt.
+ * There is no reset in this device; this function always returns 0.
+ */
+static int xemaclite_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
+ * @lp: Pointer to the Emaclite device private data
+ * @ofdev: Pointer to OF device structure
+ *
+ * This function enables MDIO bus in the Emaclite device and registers a
+ * mii_bus.
+ *
+ * Return: 0 upon success or a negative error upon failure
+ */
+static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
+{
+ struct mii_bus *bus;
+ int rc;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+
+ /* Don't register the MDIO bus if the phy_node or its parent node
+ * can't be found.
+ */
+ if (!np)
+ return -ENODEV;
+
+ /* Enable the MDIO bus by asserting the enable bit in MDIO Control
+ * register.
+ */
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ XEL_MDIOCTRL_MDIOEN_MASK);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx Emaclite MDIO";
+ bus->read = xemaclite_mdio_read;
+ bus->write = xemaclite_mdio_write;
+ bus->reset = xemaclite_mdio_reset;
+ bus->parent = dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+/**
+ * xemaclite_adjust_link - Link state callback for the Emaclite device
+ * @ndev: pointer to net_device struct
+ *
+ * There's nothing in the Emaclite device to be configured when the link
+ * state changes. We just print the status.
+ */
+void xemaclite_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ if (lp->last_link != link_state) {
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+}
+
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
+ * It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -656,14 +924,47 @@ static int xemaclite_open(struct net_device *dev)
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
+ if (lp->phy_node) {
+ u32 bmcr;
+
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ xemaclite_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!lp->phy_dev) {
+ dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ /* EmacLite doesn't support giga-bit speeds */
+ lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
+ lp->phy_dev->advertising = lp->phy_dev->supported;
+
+ /* Don't advertise 1000BASE-T Full/Half duplex speeds */
+ phy_write(lp->phy_dev, MII_CTRL1000, 0);
+
+ /* Advertise only 10 and 100mbps full/half duplex speeds */
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+
+ /* Restart auto negotiation */
+ bmcr = phy_read(lp->phy_dev, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(lp->phy_dev, MII_BMCR, bmcr);
+
+ phy_start(lp->phy_dev);
+ }
+
/* Set the MAC address each time opened */
- xemaclite_set_mac_address(lp, dev->dev_addr);
+ xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return retval;
}
@@ -682,6 +983,7 @@ static int xemaclite_open(struct net_device *dev)
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
+ * It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -691,6 +993,10 @@ static int xemaclite_close(struct net_device *dev)
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return 0;
}
@@ -754,42 +1060,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
}
/**
- * xemaclite_ioctl - Perform IO Control operations on the network device
- * @dev: Pointer to the network device
- * @rq: Pointer to the interface request structure
- * @cmd: IOCTL command
- *
- * The only IOCTL operation supported by this function is setting the MAC
- * address. An error is reported if any other operations are requested.
- *
- * Return: 0 to indicate success, or a negative error for failure.
- */
-static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
- struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
-
- switch (cmd) {
- case SIOCETHTOOL:
- return -EIO;
-
- case SIOCSIFHWADDR:
- dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
-
- /* Copy MAC address in from user space */
- copy_from_user((void __force *) dev->dev_addr,
- (void __user __force *) hw_addr,
- IFHWADDRLEN);
- xemaclite_set_mac_address(lp, dev->dev_addr);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
@@ -840,6 +1110,8 @@ static struct net_device_ops xemaclite_netdev_ops;
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
+ * It also registers a mii_bus for the Emaclite device, if MDIO is included
+ * in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
@@ -880,6 +1152,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
dev_set_drvdata(dev, ndev);
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
ndev->irq = r_irq.start;
ndev->mem_start = r_mem.start;
@@ -923,13 +1196,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
/* Set the MAC address in the EmacLite device */
- xemaclite_set_mac_address(lp, ndev->dev_addr);
+ xemaclite_update_address(lp, ndev->dev_addr);
- dev_info(dev,
- "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
- ndev->dev_addr[0], ndev->dev_addr[1],
- ndev->dev_addr[2], ndev->dev_addr[3],
- ndev->dev_addr[4], ndev->dev_addr[5]);
+ lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ rc = xemaclite_mdio_setup(lp, &ofdev->dev);
+ if (rc)
+ dev_warn(&ofdev->dev, "error registering MDIO bus\n");
+
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
@@ -972,12 +1246,25 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+
+ /* Un-register the mii_bus, if configured */
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+ }
+
unregister_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
xemaclite_remove_ndev(ndev);
-
dev_set_drvdata(dev, NULL);
return 0;
@@ -987,7 +1274,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
};
@@ -999,6 +1286,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff2..5c880240a642 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{ }
};
-static const struct pci_device_id yellowfin_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
@@ -1301,15 +1301,16 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct dev_mc_list *mclist;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b2a448e19fe6..3e5ab2bf6a5c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -706,6 +706,21 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+/**
+ * pci_num_vf - return number of VFs associated with a PF device_release_driver
+ * @dev: the PCI device
+ *
+ * Returns number of VFs, or 0 if SR-IOV is not enabled.
+ */
+int pci_num_vf(struct pci_dev *dev)
+{
+ if (!dev || !dev->is_physfn)
+ return 0;
+ else
+ return dev->sriov->nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_num_vf);
+
static int ats_alloc_one(struct pci_dev *dev, int ps)
{
int pos;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b232693378cd..a3ac4456e0b1 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -649,6 +649,7 @@ struct qeth_card_options {
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
+ int sniffer;
};
/*
@@ -737,6 +738,7 @@ struct qeth_card {
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
+ struct qdio_ssqd_desc ssqd;
};
struct qeth_card_list_struct {
@@ -811,7 +813,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
-int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
+int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
+ unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d34804d5ece1..fa8a519218ac 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -269,6 +269,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
card->qdio.init_pool.buf_count = bufcnt;
return qeth_alloc_buffer_pool(card);
}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
static int qeth_issue_next_read(struct qeth_card *card)
{
@@ -350,8 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command < IPA_CMD_SETCCID ||
- cmd->hdr.command > IPA_CMD_MODCCID)
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
+ cmd->hdr.command != IPA_CMD_DELCCID &&
+ cmd->hdr.command != IPA_CMD_MODCCID &&
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd,
cmd->hdr.return_code, card);
return cmd;
@@ -1100,11 +1103,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
- card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
- return -ENOMEM;
- }
INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -1138,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
if (!card)
- return NULL;
+ goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- if (qeth_setup_channel(&card->read)) {
- kfree(card);
- return NULL;
- }
- if (qeth_setup_channel(&card->write)) {
- qeth_clean_channel(&card->read);
- kfree(card);
- return NULL;
+ card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!card->ip_tbd_list) {
+ QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
+ goto out_card;
}
+ if (qeth_setup_channel(&card->read))
+ goto out_ip;
+ if (qeth_setup_channel(&card->write))
+ goto out_channel;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
+
+out_channel:
+ qeth_clean_channel(&card->read);
+out_ip:
+ kfree(card->ip_tbd_list);
+out_card:
+ kfree(card);
+out:
+ return NULL;
}
static int qeth_determine_card_type(struct qeth_card *card)
@@ -1355,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
return ret;
}
-static int qeth_get_unitaddr(struct qeth_card *card)
+static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
{
- int length;
- char *prcd;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "getunit");
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
- if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
- return rc;
- }
+ QETH_DBF_TEXT(SETUP, 2, "cfgunit");
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
(prcd[0x11] == _ascebc['M']));
- kfree(prcd);
- return 0;
+}
+
+static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+{
+ QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+
+ if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
+ card->info.blkt.time_total = 250;
+ card->info.blkt.inter_packet = 5;
+ card->info.blkt.inter_packet_jumbo = 15;
+ } else {
+ card->info.blkt.time_total = 0;
+ card->info.blkt.inter_packet = 0;
+ card->info.blkt.inter_packet_jumbo = 0;
+ }
}
static void qeth_init_tokens(struct qeth_card *card)
@@ -2573,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
-int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
- const char *dbftext)
+int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
+ unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2584,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
- return 1;
+ if ((buf->element[15].flags & 0xff) == 0x12) {
+ card->stats.rx_dropped++;
+ return 0;
+ } else
+ return 1;
}
return 0;
}
@@ -2667,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
qdio_err = 1;
}
}
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
return QETH_SEND_ERROR_NONE;
@@ -3509,6 +3523,7 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
+ QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
card->stats.tx_errors++;
qeth_schedule_recovery(card);
@@ -3847,9 +3862,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
- struct qdio_ssqd_desc *ssqd;
int retries = 0;
- int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3882,31 +3895,6 @@ retriable:
else
goto retry;
}
-
- rc = qeth_get_unitaddr(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- return rc;
- }
-
- ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
- if (!ssqd) {
- rc = -ENOMEM;
- goto out;
- }
- rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
- if (rc == 0)
- mpno = ssqd->pcnt;
- kfree(ssqd);
-
- if (mpno)
- mpno = min(mpno - 1, QETH_MAX_PORTNO);
- if (card->info.portno > mpno) {
- QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
- "\n.", CARD_BUS_ID(card), card->info.portno);
- rc = -ENODEV;
- goto out;
- }
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3990,7 +3978,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
int offset = *__offset;
struct sk_buff *skb = NULL;
- int skb_len;
+ int skb_len = 0;
void *data_ptr;
int data_len;
int headroom = 0;
@@ -4009,20 +3997,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
*hdr = element->addr + offset;
offset += sizeof(struct qeth_hdr);
- if (card->options.layer2) {
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- skb_len = (*hdr)->hdr.osn.pdu_length;
- headroom = sizeof(struct qeth_hdr);
- } else {
- skb_len = (*hdr)->hdr.l2.pkt_length;
- }
- } else {
+ switch ((*hdr)->hdr.l2.id) {
+ case QETH_HEADER_TYPE_LAYER2:
+ skb_len = (*hdr)->hdr.l2.pkt_length;
+ break;
+ case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR))
headroom = TR_HLEN;
else
headroom = ETH_HLEN;
+ break;
+ case QETH_HEADER_TYPE_OSN:
+ skb_len = (*hdr)->hdr.osn.pdu_length;
+ headroom = sizeof(struct qeth_hdr);
+ break;
+ default:
+ break;
}
if (!skb_len)
@@ -4177,6 +4169,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline.ccwgdriver = NULL;
}
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+ int rc;
+ int length;
+ char *prcd;
+
+ QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ rc = ccw_device_set_online(CARD_DDEV(card));
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ goto out;
+ }
+
+
+ rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+ dev_name(&card->gdev->dev), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ goto out_offline;
+ }
+ qeth_configure_unitaddr(card, prcd);
+ qeth_configure_blkt_default(card, prcd);
+ kfree(prcd);
+
+ rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+ ccw_device_set_offline(CARD_DDEV(card));
+out:
+ return;
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -4242,6 +4269,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_add_tail(&card->list, &qeth_core_card_list.list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+ qeth_determine_capabilities(card);
return 0;
err_card:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ba51152f667..104a3351e02b 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
IPA_RC_IP_TABLE_FULL = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
+ IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
+ IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_FFFF = 0xffff
};
+/* for DELIP */
+#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
+/* for SET_DIAGNOSTIC_ASSIST */
+#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
+#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
@@ -246,6 +253,7 @@ enum qeth_ipa_setadp_cmd {
IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
+ IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
};
enum qeth_ipa_mac_ops {
@@ -424,6 +432,40 @@ struct qeth_create_destroy_address {
__u8 unique_id[8];
} __attribute__ ((packed));
+/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
+
+enum qeth_diags_cmds {
+ QETH_DIAGS_CMD_QUERY = 0x0001,
+ QETH_DIAGS_CMD_TRAP = 0x0002,
+ QETH_DIAGS_CMD_TRACE = 0x0004,
+ QETH_DIAGS_CMD_NOLOG = 0x0008,
+ QETH_DIAGS_CMD_DUMP = 0x0010,
+};
+
+enum qeth_diags_trace_types {
+ QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
+};
+
+enum qeth_diags_trace_cmds {
+ QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
+ QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
+ QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
+ QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
+ QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
+};
+
+struct qeth_ipacmd_diagass {
+ __u32 host_tod2;
+ __u32:32;
+ __u16 subcmd_len;
+ __u16:16;
+ __u32 subcmd;
+ __u8 type;
+ __u8 action;
+ __u16 options;
+ __u32:32;
+} __attribute__ ((packed));
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -452,6 +494,7 @@ struct qeth_ipa_cmd {
struct qeth_create_destroy_address create_destroy_addr;
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
+ struct qeth_ipacmd_diagass diagass;
} data;
} __attribute__ ((packed));
@@ -469,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9ff2b36fdc43..88ae4357136a 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -118,7 +118,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- unsigned int portno;
+ unsigned int portno, limit;
if (!card)
return -EINVAL;
@@ -128,9 +128,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
return -EPERM;
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+ limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
+ if (portno > limit)
return -EINVAL;
- }
card->info.portno = portno;
return count;
@@ -537,7 +539,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.time_total, 1000);
+ &card->info.blkt.time_total, 5000);
}
@@ -559,7 +561,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet, 100);
+ &card->info.blkt.inter_packet, 1000);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -580,7 +582,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet_jumbo, 100);
+ &card->info.blkt.inter_packet_jumbo, 1000);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d1..51fde6f2e0b8 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "already exists\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM already exists\n",
+ card->dev->dev_addr);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "is not authorized\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM is not authorized\n",
+ card->dev->dev_addr);
break;
default:
break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
OSA_ADDR_LEN);
dev_info(&card->gdev->dev,
- "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "successfully registered on device %s\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5],
- card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
}
return 0;
}
@@ -634,7 +622,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
for (dm = dev->mc_list; dm; dm = dm->next)
qeth_l2_add_mc(card, dm->da_addr, 0);
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
@@ -781,7 +769,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
+ "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -938,7 +927,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 321988fa9f7d..8447d233d0b3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
#include "qeth_core.h"
+#define QETH_SNIFF_AVAIL 0x0008
+
struct qeth_ipaddr {
struct list_head entry;
enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd1b6ed3721f..5475834ab916 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -242,6 +242,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
struct qeth_ipaddr *tmp, *t;
int found = 0;
+ if (card->options.sniffer)
+ return 0;
list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
(tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -457,6 +459,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 2, "sdiplist");
QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ if (card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -495,7 +499,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
spin_unlock_irqrestore(&card->ip_lock, flags);
rc = qeth_l3_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, flags);
- if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED))
+ if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
kfree(addr);
else
list_add_tail(&addr->entry, &card->ip_list);
@@ -513,6 +517,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
unsigned long flags;
QETH_DBF_TEXT(TRACE, 4, "clearip");
+ if (recover && card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
/* clear todo list */
list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1674,6 +1680,76 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
return rc;
}
+static int
+qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ __u16 rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+
+ cmd = (struct qeth_ipa_cmd *)data;
+ rc = cmd->hdr.return_code;
+ if (rc) {
+ QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ if (cmd->data.diagass.action == QETH_DIAGS_CMD_TRACE_ENABLE) {
+ switch (rc) {
+ case IPA_RC_HARDWARE_AUTH_ERROR:
+ dev_warn(&card->gdev->dev, "The device is not "
+ "authorized to run as a HiperSockets "
+ "network traffic analyzer\n");
+ break;
+ case IPA_RC_TRACE_ALREADY_ACTIVE:
+ dev_warn(&card->gdev->dev, "A HiperSockets "
+ "network traffic analyzer is already "
+ "active in the HiperSockets LAN\n");
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+ }
+
+ switch (cmd->data.diagass.action) {
+ case QETH_DIAGS_CMD_TRACE_QUERY:
+ break;
+ case QETH_DIAGS_CMD_TRACE_DISABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_OFF;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is deactivated\n");
+ break;
+ case QETH_DIAGS_CMD_TRACE_ENABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_ON;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is activated\n");
+ break;
+ default:
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
+ cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ }
+
+ return 0;
+}
+
+static int
+qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.diagass.subcmd_len = 16;
+ cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
+ cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
+ cmd->data.diagass.action = diags_cmd;
+ return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
+}
+
static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
struct net_device *dev)
{
@@ -1951,7 +2027,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
case QETH_CAST_ANYCAST:
case QETH_CAST_NOCAST:
default:
- skb->pkt_type = PACKET_HOST;
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ skb->pkt_type = PACKET_HOST;
memcpy(tg_addr, card->dev->dev_addr,
card->dev->addr_len);
}
@@ -2007,7 +2086,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
int offset;
__u16 vlan_tag = 0;
unsigned int len;
-
/* get first element of current buffer */
element = (struct qdio_buffer_element *)&buf->buffer->element[0];
offset = 0;
@@ -2026,7 +2104,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER3:
vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
len = skb->len;
- if (vlan_tag)
+ if (vlan_tag && !card->options.sniffer)
if (card->vlangrp)
vlan_hwaccel_rx(skb, card->vlangrp,
vlan_tag);
@@ -2037,6 +2115,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
else
netif_rx(skb);
break;
+ case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (card->options.checksum_type == NO_CHECKSUMMING)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ netif_receive_skb(skb);
+ break;
default:
dev_kfree_skb_any(skb);
QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2118,6 +2206,9 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
@@ -2162,6 +2253,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
return rc;
}
+/*
+ * test for and Switch promiscuous mode (on or off)
+ * either for guestlan or HiperSocket Sniffer
+ */
+static void
+qeth_l3_handle_promisc_mode(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+
+ if (((dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+ (!(dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+ return;
+
+ if (card->info.guestlan) { /* Guestlan trace */
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ qeth_setadp_promisc_mode(card);
+ } else if (card->options.sniffer && /* HiperSockets trace */
+ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ if (dev->flags & IFF_PROMISC) {
+ QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
+ } else {
+ QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+ }
+ }
+}
+
static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -2170,15 +2291,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
- qeth_l3_delete_mc_addresses(card);
- qeth_l3_add_multicast_ipv4(card);
+ if (!card->options.sniffer) {
+ qeth_l3_delete_mc_addresses(card);
+ qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
- qeth_l3_add_multicast_ipv6(card);
+ qeth_l3_add_multicast_ipv6(card);
#endif
- qeth_l3_set_ip_addr_list(card);
- if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- return;
- qeth_setadp_promisc_mode(card);
+ qeth_l3_set_ip_addr_list(card);
+ if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ return;
+ }
+ qeth_l3_handle_promisc_mode(card);
}
static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2778,8 +2901,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int nr_frags;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (skb->protocol != htons(ETH_P_IPV6)) &&
- (skb->protocol != htons(ETH_P_IP)))
+ (((skb->protocol != htons(ETH_P_IPV6)) &&
+ (skb->protocol != htons(ETH_P_IP))) ||
+ card->options.sniffer))
goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -3155,7 +3279,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer,
+ qeth_check_qdio_errors(card, buffer->buffer,
qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
@@ -3214,8 +3338,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
-
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
@@ -3250,20 +3372,22 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
} else
card->lan_online = 1;
- qeth_l3_set_large_send(card, card->options.large_send);
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- rc = qeth_l3_start_ipassists(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- rc = qeth_l3_setrouting_v4(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
- rc = qeth_l3_setrouting_v6(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ if (!card->options.sniffer) {
+ rc = qeth_l3_start_ipassists(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ qeth_l3_set_large_send(card, card->options.large_send);
+ rc = qeth_l3_setrouting_v4(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ rc = qeth_l3_setrouting_v6(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ }
netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 3360b0941aa1..3f08b11274ae 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -319,6 +319,61 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
qeth_l3_dev_checksum_store);
+static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+}
+
+static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int ret;
+ unsigned long i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ return -EPERM;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 16, &i);
+ if (ret)
+ return -EINVAL;
+ switch (i) {
+ case 0:
+ card->options.sniffer = i;
+ break;
+ case 1:
+ ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+ card->options.sniffer = i;
+ if (card->qdio.init_pool.buf_count !=
+ QETH_IN_BUF_COUNT_MAX)
+ qeth_realloc_buffer_pool(card,
+ QETH_IN_BUF_COUNT_MAX);
+ break;
+ } else
+ return -EPERM;
+ default: /* fall through */
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
+ qeth_l3_dev_sniffer_store);
+
static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -373,6 +428,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_broadcast_mode.attr,
&dev_attr_canonical_macaddr.attr,
&dev_attr_checksumming.attr,
+ &dev_attr_sniffer.attr,
&dev_attr_large_send.attr,
NULL,
};
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3c6feed46f6e..97efce184a8f 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -270,7 +270,6 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
}
break;
- /* fallthrough */
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
case SSB_DEV_ETHERNET_GBIT:
@@ -281,6 +280,10 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
break;
}
+ /* fallthrough */
+ case SSB_DEV_EXTIF:
+ set_irq(dev, 0);
+ break;
}
}
ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index 24d97b4fa6fb..bc1fad248952 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -444,11 +444,11 @@ void et131x_multicast(struct net_device *netdev)
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count > NIC_MAX_MCAST_LIST) {
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) {
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count < 1) {
+ if (netdev_mc_count(netdev) < 1) {
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
} else {
@@ -456,10 +456,10 @@ void et131x_multicast(struct net_device *netdev)
}
/* Set values in the private adapter struct */
- adapter->MCAddressCount = netdev->mc_count;
+ adapter->MCAddressCount = netdev_mc_count(netdev);
- if (netdev->mc_count) {
- count = netdev->mc_count - 1;
+ if (!netdev_mc_empty(netdev)) {
+ count = netdev_mc_count(netdev) - 1;
memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
}
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index e61e6b9440ab..e936717d1f4b 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -1341,15 +1341,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
xstatic int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
+ if (old != netdev_mc_count(dev)) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
}
}
#endif
- if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
/* Multicast Mode */
rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
} else if (dev->flags & IFF_PROMISC) {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 0d490c164db6..9086047c32d4 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -482,15 +482,6 @@ struct ieee80211_header_data {
u16 seq_ctrl;
};
-struct ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __attribute__ ((packed));
-
struct ieee80211_hdr_4addr {
u16 frame_ctl;
u16 duration_id;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c7c645af0ebb..a2150670ef56 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -203,7 +203,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
enqueue_mgmt(ieee,skb);
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -220,7 +220,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -246,7 +246,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
if(single){
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -259,7 +259,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -287,7 +287,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
return NULL;
disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
- disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
+ disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
@@ -905,7 +905,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
assoc = (struct ieee80211_assoc_response_frame *)
skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
- assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
+ assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
memcpy(assoc->header.addr1, dest,ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -981,7 +981,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
(pwr ? IEEE80211_FCTL_PM:0));
@@ -1084,7 +1084,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
- hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
+ hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
hdr->header.duration_id= 37; //FIXME
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1786,11 +1786,11 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
tasklet_schedule(&ieee->ps_task);
- if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
+ if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP &&
+ WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON)
ieee->last_rx_ps_time = jiffies;
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(header->frame_control)) {
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
@@ -2064,7 +2064,7 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
header = (struct ieee80211_hdr_3addr *) skb->data;
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index e0f13efdb15a..1847f38b9f22 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1890,7 +1890,7 @@ rate)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
- short morefrag = (h->frame_ctl) & IEEE80211_FCTL_MOREFRAGS;
+ short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
unsigned long flags;
int priority;
@@ -2158,7 +2158,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
}
- if(!(frag_hdr->frame_ctl & IEEE80211_FCTL_MOREFRAGS)) { //no more fragment
+ if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
// ThisFrame-ACK.
Duration = aSifsTime + AckTime;
} else { // One or more fragments remained.
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 5b191afc1442..8c9d5e5c7702 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1365,7 +1365,7 @@ static void slic_mcast_set_list(struct net_device *dev)
int i;
char *addresses;
struct dev_mc_list *mc_list = dev->mc_list;
- int mc_count = dev->mc_count;
+ int mc_count = netdev_mc_count(dev);
ASSERT(adapter);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 0db8d7b6e79c..82b3a6e0b15a 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3093,7 +3093,7 @@ static void device_set_multi(struct net_device *dev) {
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit)
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
@@ -3103,7 +3103,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ef17c4958c67..2c6a5350547c 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1619,7 +1619,8 @@ static void device_set_multi(struct net_device *dev) {
// Unconditionally log net taps.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) {
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_MAR0,
@@ -1631,7 +1632,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (ii = 0, mclist = dev->mc_list; mclist && ii < dev->mc_count;
+ for (ii = 0, mclist = dev->mc_list; mclist && ii < netdev_mc_count(dev);
ii++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b84..961f1417fb58 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -1367,7 +1367,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG
"%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
/* Are we asking for promiscuous mode,
@@ -1375,7 +1375,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
* or too many multicast addresses for the hardware filter? */
if ((dev->flags & IFF_PROMISC) ||
(dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(dev) > I82586_MAX_MULTICAST_ADDRESSES)) {
/*
* Enable promiscuous mode: receive all packets.
*/
@@ -1393,11 +1393,11 @@ static void wavelan_set_multicast_list(struct net_device * dev)
* in multicast list
*/
#ifdef MULTICAST_AVOID
- if (lp->promiscuous || (dev->mc_count != lp->mc_count))
+ if (lp->promiscuous || (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82586_reconfig(dev);
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 10c702b5be4a..08fcb226d7d6 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -1373,7 +1373,7 @@ wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
if(dev->flags & IFF_PROMISC)
@@ -1394,7 +1394,7 @@ wavelan_set_multicast_list(struct net_device * dev)
/* If all multicast addresses
* or too much multicast addresses for the hardware filter */
if((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82593_MAX_MULTICAST_ADDRESSES))
+ (netdev_mc_count(dev) > I82593_MAX_MULTICAST_ADDRESSES))
{
/*
* Disable promiscuous mode, but active the all multicast mode
@@ -1418,12 +1418,12 @@ wavelan_set_multicast_list(struct net_device * dev)
*/
#ifdef MULTICAST_AVOID
if(lp->promiscuous || lp->allmulticast ||
- (dev->mc_count != lp->mc_count))
+ (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
lp->allmulticast = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82593_reconfig(dev);
}
@@ -3622,7 +3622,8 @@ wv_82593_config(struct net_device * dev)
if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
OP0_MC_SETUP, SR0_MC_SETUP_DONE))
ret = FALSE;
- lp->mc_count = dev->mc_count; /* remember to avoid repeated reset */
+ /* remember to avoid repeated reset */
+ lp->mc_count = netdev_mc_count(dev);
}
/* Job done, clear the flag */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index ac3890247965..a95ebf881fcd 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1070,9 +1070,9 @@ void wl_multicast( struct net_device *dev )
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
- DBG_PRINT( " mc_count: %d\n", dev->mc_count );
+ DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- for( x = 0, mclist = dev->mc_list; mclist && x < dev->mc_count;
+ for( x = 0, mclist = dev->mc_list; mclist && x < netdev_mc_count(dev);
x++, mclist = mclist->next ) {
DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
mclist->dmi_addrlen );
@@ -1103,7 +1103,7 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
- else if(( dev->mc_count > HCF_MAX_MULTICAST ) ||
+ else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
@@ -1115,13 +1115,13 @@ void wl_multicast( struct net_device *dev )
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
- else if( dev->mc_count != 0 ) {
+ else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
- lp->ltvRecord.len = ( dev->mc_count * 3 ) + 1;
+ lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
for( x = 0, mclist = dev->mc_list;
- ( x < dev->mc_count ) && ( mclist != NULL );
+ ( x < netdev_mc_count(dev)) && ( mclist != NULL );
x++, mclist = mclist->next ) {
memcpy( &( lp->ltvRecord.u.u8[x * ETH_ALEN] ),
mclist->dmi_addr, ETH_ALEN );
@@ -1194,9 +1194,7 @@ static const struct net_device_ops wl_netdev_ops =
.ndo_stop = &wl_adapter_close,
.ndo_do_ioctl = &wl_ioctl,
-#ifdef HAVE_TX_TIMEOUT
.ndo_tx_timeout = &wl_tx_timeout,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = wl_poll,
@@ -1270,9 +1268,7 @@ struct net_device * wl_device_alloc( void )
dev->stop = &wl_adapter_close;
dev->do_ioctl = &wl_ioctl;
-#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = &wl_tx_timeout;
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = wl_poll;
@@ -1280,9 +1276,7 @@ struct net_device * wl_device_alloc( void )
#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
-#ifdef HAVE_TX_TIMEOUT
dev->watchdog_timeo = TX_TIMEOUT;
-#endif
dev->ethtool_ops = &wl_ethtool_ops;
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
new file mode 100644
index 000000000000..9e9355367bb3
--- /dev/null
+++ b/drivers/vhost/Kconfig
@@ -0,0 +1,11 @@
+config VHOST_NET
+ tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
+ depends on NET && EVENTFD && (TUN || !TUN) && EXPERIMENTAL
+ ---help---
+ This kernel module can be loaded in host kernel to accelerate
+ guest networking with virtio_net. Not to be confused with virtio_net
+ module itself which needs to be loaded in guest kernel.
+
+ To compile this driver as a module, choose M here: the module will
+ be called vhost_net.
+
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
new file mode 100644
index 000000000000..72dd02050bb9
--- /dev/null
+++ b/drivers/vhost/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VHOST_NET) += vhost_net.o
+vhost_net-y := vhost.o net.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
new file mode 100644
index 000000000000..4c8928319e1d
--- /dev/null
+++ b/drivers/vhost/net.c
@@ -0,0 +1,661 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * virtio-net server in host kernel.
+ */
+
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <linux/mmu_context.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/rcupdate.h>
+#include <linux/file.h>
+
+#include <linux/net.h>
+#include <linux/if_packet.h>
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+
+#include <net/sock.h>
+
+#include "vhost.h"
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_NET_WEIGHT 0x80000
+
+enum {
+ VHOST_NET_VQ_RX = 0,
+ VHOST_NET_VQ_TX = 1,
+ VHOST_NET_VQ_MAX = 2,
+};
+
+enum vhost_net_poll_state {
+ VHOST_NET_POLL_DISABLED = 0,
+ VHOST_NET_POLL_STARTED = 1,
+ VHOST_NET_POLL_STOPPED = 2,
+};
+
+struct vhost_net {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
+ struct vhost_poll poll[VHOST_NET_VQ_MAX];
+ /* Tells us whether we are polling a socket for TX.
+ * We only do this when socket buffer fills up.
+ * Protected by tx vq lock. */
+ enum vhost_net_poll_state tx_poll_state;
+};
+
+/* Pop first len bytes from iovec. Return number of segments used. */
+static int move_iovec_hdr(struct iovec *from, struct iovec *to,
+ size_t len, int iov_count)
+{
+ int seg = 0;
+ size_t size;
+ while (len && seg < iov_count) {
+ size = min(from->iov_len, len);
+ to->iov_base = from->iov_base;
+ to->iov_len = size;
+ from->iov_len -= size;
+ from->iov_base += size;
+ len -= size;
+ ++from;
+ ++to;
+ ++seg;
+ }
+ return seg;
+}
+
+/* Caller must have TX VQ lock */
+static void tx_poll_stop(struct vhost_net *net)
+{
+ if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
+ return;
+ vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
+ net->tx_poll_state = VHOST_NET_POLL_STOPPED;
+}
+
+/* Caller must have TX VQ lock */
+static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+{
+ if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
+ return;
+ vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+ net->tx_poll_state = VHOST_NET_POLL_STARTED;
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
+ unsigned head, out, in, s;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_iov = vq->iov,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ size_t len, total_len = 0;
+ int err, wmem;
+ size_t hdr_size;
+ struct socket *sock = rcu_dereference(vq->private_data);
+ if (!sock)
+ return;
+
+ wmem = atomic_read(&sock->sk->sk_wmem_alloc);
+ if (wmem >= sock->sk->sk_sndbuf)
+ return;
+
+ use_mm(net->dev.mm);
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(vq);
+
+ if (wmem < sock->sk->sk_sndbuf * 2)
+ tx_poll_stop(net);
+ hdr_size = vq->hdr_size;
+
+ for (;;) {
+ head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in,
+ NULL, NULL);
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ wmem = atomic_read(&sock->sk->sk_wmem_alloc);
+ if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
+ if (unlikely(vhost_enable_notify(vq))) {
+ vhost_disable_notify(vq);
+ continue;
+ }
+ break;
+ }
+ if (in) {
+ vq_err(vq, "Unexpected descriptor format for TX: "
+ "out %d, int %d\n", out, in);
+ break;
+ }
+ /* Skip header. TODO: support TSO. */
+ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
+ msg.msg_iovlen = out;
+ len = iov_length(vq->iov, out);
+ /* Sanity check */
+ if (!len) {
+ vq_err(vq, "Unexpected header len for TX: "
+ "%zd expected %zd\n",
+ iov_length(vq->hdr, s), hdr_size);
+ break;
+ }
+ /* TODO: Check specific error and bomb out unless ENOBUFS? */
+ err = sock->ops->sendmsg(NULL, sock, &msg, len);
+ if (unlikely(err < 0)) {
+ vhost_discard_vq_desc(vq);
+ tx_poll_start(net, sock);
+ break;
+ }
+ if (err != len)
+ pr_err("Truncated TX packet: "
+ " len %d != %zd\n", err, len);
+ vhost_add_used_and_signal(&net->dev, vq, head, 0);
+ total_len += len;
+ if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ mutex_unlock(&vq->mutex);
+ unuse_mm(net->dev.mm);
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_rx(struct vhost_net *net)
+{
+ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
+ unsigned head, out, in, log, s;
+ struct vhost_log *vq_log;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL, /* FIXME: get and handle RX aux data. */
+ .msg_controllen = 0,
+ .msg_iov = vq->iov,
+ .msg_flags = MSG_DONTWAIT,
+ };
+
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+
+ size_t len, total_len = 0;
+ int err;
+ size_t hdr_size;
+ struct socket *sock = rcu_dereference(vq->private_data);
+ if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
+ return;
+
+ use_mm(net->dev.mm);
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(vq);
+ hdr_size = vq->hdr_size;
+
+ vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
+ for (;;) {
+ head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in,
+ vq_log, &log);
+ /* OK, now we need to know about added descriptors. */
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(vq))) {
+ /* They have slipped one in as we were
+ * doing that: check again. */
+ vhost_disable_notify(vq);
+ continue;
+ }
+ /* Nothing new? Wait for eventfd to tell us
+ * they refilled. */
+ break;
+ }
+ /* We don't need to be notified again. */
+ if (out) {
+ vq_err(vq, "Unexpected descriptor format for RX: "
+ "out %d, int %d\n",
+ out, in);
+ break;
+ }
+ /* Skip header. TODO: support TSO/mergeable rx buffers. */
+ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
+ msg.msg_iovlen = in;
+ len = iov_length(vq->iov, in);
+ /* Sanity check */
+ if (!len) {
+ vq_err(vq, "Unexpected header len for RX: "
+ "%zd expected %zd\n",
+ iov_length(vq->hdr, s), hdr_size);
+ break;
+ }
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ len, MSG_DONTWAIT | MSG_TRUNC);
+ /* TODO: Check specific error and bomb out unless EAGAIN? */
+ if (err < 0) {
+ vhost_discard_vq_desc(vq);
+ break;
+ }
+ /* TODO: Should check and handle checksum. */
+ if (err > len) {
+ pr_err("Discarded truncated rx packet: "
+ " len %d > %zd\n", err, len);
+ vhost_discard_vq_desc(vq);
+ continue;
+ }
+ len = err;
+ err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
+ if (err) {
+ vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
+ vq->iov->iov_base, err);
+ break;
+ }
+ len += hdr_size;
+ vhost_add_used_and_signal(&net->dev, vq, head, len);
+ if (unlikely(vq_log))
+ vhost_log_write(vq, vq_log, log, len);
+ total_len += len;
+ if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ mutex_unlock(&vq->mutex);
+ unuse_mm(net->dev.mm);
+}
+
+static void handle_tx_kick(struct work_struct *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_net *net;
+ vq = container_of(work, struct vhost_virtqueue, poll.work);
+ net = container_of(vq->dev, struct vhost_net, dev);
+ handle_tx(net);
+}
+
+static void handle_rx_kick(struct work_struct *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_net *net;
+ vq = container_of(work, struct vhost_virtqueue, poll.work);
+ net = container_of(vq->dev, struct vhost_net, dev);
+ handle_rx(net);
+}
+
+static void handle_tx_net(struct work_struct *work)
+{
+ struct vhost_net *net;
+ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
+ handle_tx(net);
+}
+
+static void handle_rx_net(struct work_struct *work)
+{
+ struct vhost_net *net;
+ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
+ handle_rx(net);
+}
+
+static int vhost_net_open(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+ int r;
+ if (!n)
+ return -ENOMEM;
+ n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
+ n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
+ r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
+ if (r < 0) {
+ kfree(n);
+ return r;
+ }
+
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
+ n->tx_poll_state = VHOST_NET_POLL_DISABLED;
+
+ f->private_data = n;
+
+ return 0;
+}
+
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ if (!vq->private_data)
+ return;
+ if (vq == n->vqs + VHOST_NET_VQ_TX) {
+ tx_poll_stop(n);
+ n->tx_poll_state = VHOST_NET_POLL_DISABLED;
+ } else
+ vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
+}
+
+static void vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct socket *sock = vq->private_data;
+ if (!sock)
+ return;
+ if (vq == n->vqs + VHOST_NET_VQ_TX) {
+ n->tx_poll_state = VHOST_NET_POLL_STOPPED;
+ tx_poll_start(n, sock);
+ } else
+ vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+}
+
+static struct socket *vhost_net_stop_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct socket *sock;
+
+ mutex_lock(&vq->mutex);
+ sock = vq->private_data;
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, NULL);
+ mutex_unlock(&vq->mutex);
+ return sock;
+}
+
+static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
+ struct socket **rx_sock)
+{
+ *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
+ *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
+}
+
+static void vhost_net_flush_vq(struct vhost_net *n, int index)
+{
+ vhost_poll_flush(n->poll + index);
+ vhost_poll_flush(&n->dev.vqs[index].poll);
+}
+
+static void vhost_net_flush(struct vhost_net *n)
+{
+ vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
+ vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+}
+
+static int vhost_net_release(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n = f->private_data;
+ struct socket *tx_sock;
+ struct socket *rx_sock;
+
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ vhost_dev_cleanup(&n->dev);
+ if (tx_sock)
+ fput(tx_sock->file);
+ if (rx_sock)
+ fput(rx_sock->file);
+ /* We do an extra flush before freeing memory,
+ * since jobs can re-queue themselves. */
+ vhost_net_flush(n);
+ kfree(n);
+ return 0;
+}
+
+static struct socket *get_raw_socket(int fd)
+{
+ struct {
+ struct sockaddr_ll sa;
+ char buf[MAX_ADDR_LEN];
+ } uaddr;
+ int uaddr_len = sizeof uaddr, r;
+ struct socket *sock = sockfd_lookup(fd, &r);
+ if (!sock)
+ return ERR_PTR(-ENOTSOCK);
+
+ /* Parameter checking */
+ if (sock->sk->sk_type != SOCK_RAW) {
+ r = -ESOCKTNOSUPPORT;
+ goto err;
+ }
+
+ r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
+ &uaddr_len, 0);
+ if (r)
+ goto err;
+
+ if (uaddr.sa.sll_family != AF_PACKET) {
+ r = -EPFNOSUPPORT;
+ goto err;
+ }
+ return sock;
+err:
+ fput(sock->file);
+ return ERR_PTR(r);
+}
+
+static struct socket *get_tun_socket(int fd)
+{
+ struct file *file = fget(fd);
+ struct socket *sock;
+ if (!file)
+ return ERR_PTR(-EBADF);
+ sock = tun_get_socket(file);
+ if (IS_ERR(sock))
+ fput(file);
+ return sock;
+}
+
+static struct socket *get_socket(int fd)
+{
+ struct socket *sock;
+ /* special case to disable backend */
+ if (fd == -1)
+ return NULL;
+ sock = get_raw_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = get_tun_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ return ERR_PTR(-ENOTSOCK);
+}
+
+static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
+{
+ struct socket *sock, *oldsock;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_NET_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = n->vqs + index;
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err;
+ }
+ sock = get_socket(fd);
+ if (IS_ERR(sock)) {
+ r = PTR_ERR(sock);
+ goto err;
+ }
+
+ /* start polling new socket */
+ oldsock = vq->private_data;
+ if (sock == oldsock)
+ goto done;
+
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, sock);
+ vhost_net_enable_vq(n, vq);
+ mutex_unlock(&vq->mutex);
+done:
+ if (oldsock) {
+ vhost_net_flush_vq(n, index);
+ fput(oldsock->file);
+ }
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_net_reset_owner(struct vhost_net *n)
+{
+ struct socket *tx_sock = NULL;
+ struct socket *rx_sock = NULL;
+ long err;
+ mutex_lock(&n->dev.mutex);
+ err = vhost_dev_check_owner(&n->dev);
+ if (err)
+ goto done;
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ err = vhost_dev_reset_owner(&n->dev);
+done:
+ mutex_unlock(&n->dev.mutex);
+ if (tx_sock)
+ fput(tx_sock->file);
+ if (rx_sock)
+ fput(rx_sock->file);
+ return err;
+}
+
+static int vhost_net_set_features(struct vhost_net *n, u64 features)
+{
+ size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
+ sizeof(struct virtio_net_hdr) : 0;
+ int i;
+ mutex_lock(&n->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&n->dev)) {
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
+ }
+ n->dev.acked_features = features;
+ smp_wmb();
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ mutex_lock(&n->vqs[i].mutex);
+ n->vqs[i].hdr_size = hdr_size;
+ mutex_unlock(&n->vqs[i].mutex);
+ }
+ vhost_net_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+}
+
+static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_net *n = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ struct vhost_vring_file backend;
+ u64 features;
+ int r;
+ switch (ioctl) {
+ case VHOST_NET_SET_BACKEND:
+ r = copy_from_user(&backend, argp, sizeof backend);
+ if (r < 0)
+ return r;
+ return vhost_net_set_backend(n, backend.index, backend.fd);
+ case VHOST_GET_FEATURES:
+ features = VHOST_FEATURES;
+ return copy_to_user(featurep, &features, sizeof features);
+ case VHOST_SET_FEATURES:
+ r = copy_from_user(&features, featurep, sizeof features);
+ if (r < 0)
+ return r;
+ if (features & ~VHOST_FEATURES)
+ return -EOPNOTSUPP;
+ return vhost_net_set_features(n, features);
+ case VHOST_RESET_OWNER:
+ return vhost_net_reset_owner(n);
+ default:
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_ioctl(&n->dev, ioctl, arg);
+ vhost_net_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return r;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+const static struct file_operations vhost_net_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_net_release,
+ .unlocked_ioctl = vhost_net_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vhost_net_compat_ioctl,
+#endif
+ .open = vhost_net_open,
+};
+
+static struct miscdevice vhost_net_misc = {
+ VHOST_NET_MINOR,
+ "vhost-net",
+ &vhost_net_fops,
+};
+
+int vhost_net_init(void)
+{
+ int r = vhost_init();
+ if (r)
+ goto err_init;
+ r = misc_register(&vhost_net_misc);
+ if (r)
+ goto err_reg;
+ return 0;
+err_reg:
+ vhost_cleanup();
+err_init:
+ return r;
+
+}
+module_init(vhost_net_init);
+
+void vhost_net_exit(void)
+{
+ misc_deregister(&vhost_net_misc);
+ vhost_cleanup();
+}
+module_exit(vhost_net_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
new file mode 100644
index 000000000000..6eb15259f5ae
--- /dev/null
+++ b/drivers/vhost/vhost.c
@@ -0,0 +1,1098 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/lguest/lguest.c, by Rusty Russell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Generic code for virtio server in host kernel.
+ */
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/rcupdate.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/highmem.h>
+
+#include <linux/net.h>
+#include <linux/if_packet.h>
+#include <linux/if_arp.h>
+
+#include <net/sock.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_MEMORY_MAX_NREGIONS = 64,
+ VHOST_MEMORY_F_LOG = 0x1,
+};
+
+static struct workqueue_struct *vhost_workqueue;
+
+static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct vhost_poll *poll;
+ poll = container_of(pt, struct vhost_poll, table);
+
+ poll->wqh = wqh;
+ add_wait_queue(wqh, &poll->wait);
+}
+
+static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct vhost_poll *poll;
+ poll = container_of(wait, struct vhost_poll, wait);
+ if (!((unsigned long)key & poll->mask))
+ return 0;
+
+ queue_work(vhost_workqueue, &poll->work);
+ return 0;
+}
+
+/* Init poll structure */
+void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
+ unsigned long mask)
+{
+ INIT_WORK(&poll->work, func);
+ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
+ init_poll_funcptr(&poll->table, vhost_poll_func);
+ poll->mask = mask;
+}
+
+/* Start polling a file. We add ourselves to file's wait queue. The caller must
+ * keep a reference to a file until after vhost_poll_stop is called. */
+void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+{
+ unsigned long mask;
+ mask = file->f_op->poll(file, &poll->table);
+ if (mask)
+ vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+}
+
+/* Stop polling a file. After this function returns, it becomes safe to drop the
+ * file reference. You must also flush afterwards. */
+void vhost_poll_stop(struct vhost_poll *poll)
+{
+ remove_wait_queue(poll->wqh, &poll->wait);
+}
+
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+ flush_work(&poll->work);
+}
+
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ queue_work(vhost_workqueue, &poll->work);
+}
+
+static void vhost_vq_reset(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
+{
+ vq->num = 1;
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+ vq->last_avail_idx = 0;
+ vq->avail_idx = 0;
+ vq->last_used_idx = 0;
+ vq->used_flags = 0;
+ vq->used_flags = 0;
+ vq->log_used = false;
+ vq->log_addr = -1ull;
+ vq->hdr_size = 0;
+ vq->private_data = NULL;
+ vq->log_base = NULL;
+ vq->error_ctx = NULL;
+ vq->error = NULL;
+ vq->kick = NULL;
+ vq->call_ctx = NULL;
+ vq->call = NULL;
+}
+
+long vhost_dev_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vqs, int nvqs)
+{
+ int i;
+ dev->vqs = vqs;
+ dev->nvqs = nvqs;
+ mutex_init(&dev->mutex);
+ dev->log_ctx = NULL;
+ dev->log_file = NULL;
+ dev->memory = NULL;
+ dev->mm = NULL;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ dev->vqs[i].dev = dev;
+ mutex_init(&dev->vqs[i].mutex);
+ vhost_vq_reset(dev, dev->vqs + i);
+ if (dev->vqs[i].handle_kick)
+ vhost_poll_init(&dev->vqs[i].poll,
+ dev->vqs[i].handle_kick,
+ POLLIN);
+ }
+ return 0;
+}
+
+/* Caller should have device mutex */
+long vhost_dev_check_owner(struct vhost_dev *dev)
+{
+ /* Are you the owner? If not, I don't think you mean to do that */
+ return dev->mm == current->mm ? 0 : -EPERM;
+}
+
+/* Caller should have device mutex */
+static long vhost_dev_set_owner(struct vhost_dev *dev)
+{
+ /* Is there an owner already? */
+ if (dev->mm)
+ return -EBUSY;
+ /* No owner, become one */
+ dev->mm = get_task_mm(current);
+ return 0;
+}
+
+/* Caller should have device mutex */
+long vhost_dev_reset_owner(struct vhost_dev *dev)
+{
+ struct vhost_memory *memory;
+
+ /* Restore memory to default empty mapping. */
+ memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ if (!memory)
+ return -ENOMEM;
+
+ vhost_dev_cleanup(dev);
+
+ memory->nregions = 0;
+ dev->memory = memory;
+ return 0;
+}
+
+/* Caller should have device mutex */
+void vhost_dev_cleanup(struct vhost_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->nvqs; ++i) {
+ if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
+ vhost_poll_stop(&dev->vqs[i].poll);
+ vhost_poll_flush(&dev->vqs[i].poll);
+ }
+ if (dev->vqs[i].error_ctx)
+ eventfd_ctx_put(dev->vqs[i].error_ctx);
+ if (dev->vqs[i].error)
+ fput(dev->vqs[i].error);
+ if (dev->vqs[i].kick)
+ fput(dev->vqs[i].kick);
+ if (dev->vqs[i].call_ctx)
+ eventfd_ctx_put(dev->vqs[i].call_ctx);
+ if (dev->vqs[i].call)
+ fput(dev->vqs[i].call);
+ vhost_vq_reset(dev, dev->vqs + i);
+ }
+ if (dev->log_ctx)
+ eventfd_ctx_put(dev->log_ctx);
+ dev->log_ctx = NULL;
+ if (dev->log_file)
+ fput(dev->log_file);
+ dev->log_file = NULL;
+ /* No one will access memory at this point */
+ kfree(dev->memory);
+ dev->memory = NULL;
+ if (dev->mm)
+ mmput(dev->mm);
+ dev->mm = NULL;
+}
+
+static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+{
+ u64 a = addr / VHOST_PAGE_SIZE / 8;
+ /* Make sure 64 bit math will not overflow. */
+ if (a > ULONG_MAX - (unsigned long)log_base ||
+ a + (unsigned long)log_base > ULONG_MAX)
+ return -EFAULT;
+
+ return access_ok(VERIFY_WRITE, log_base + a,
+ (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
+}
+
+/* Caller should have vq mutex and device mutex. */
+static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
+ int log_all)
+{
+ int i;
+ for (i = 0; i < mem->nregions; ++i) {
+ struct vhost_memory_region *m = mem->regions + i;
+ unsigned long a = m->userspace_addr;
+ if (m->memory_size > ULONG_MAX)
+ return 0;
+ else if (!access_ok(VERIFY_WRITE, (void __user *)a,
+ m->memory_size))
+ return 0;
+ else if (log_all && !log_access_ok(log_base,
+ m->guest_phys_addr,
+ m->memory_size))
+ return 0;
+ }
+ return 1;
+}
+
+/* Can we switch to this memory table? */
+/* Caller should have device mutex but not vq mutex */
+static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
+ int log_all)
+{
+ int i;
+ for (i = 0; i < d->nvqs; ++i) {
+ int ok;
+ mutex_lock(&d->vqs[i].mutex);
+ /* If ring is inactive, will check when it's enabled. */
+ if (d->vqs[i].private_data)
+ ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
+ log_all);
+ else
+ ok = 1;
+ mutex_unlock(&d->vqs[i].mutex);
+ if (!ok)
+ return 0;
+ }
+ return 1;
+}
+
+static int vq_access_ok(unsigned int num,
+ struct vring_desc __user *desc,
+ struct vring_avail __user *avail,
+ struct vring_used __user *used)
+{
+ return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
+ access_ok(VERIFY_READ, avail,
+ sizeof *avail + num * sizeof *avail->ring) &&
+ access_ok(VERIFY_WRITE, used,
+ sizeof *used + num * sizeof *used->ring);
+}
+
+/* Can we log writes? */
+/* Caller should have device mutex but not vq mutex */
+int vhost_log_access_ok(struct vhost_dev *dev)
+{
+ return memory_access_ok(dev, dev->memory, 1);
+}
+
+/* Verify access for write logging. */
+/* Caller should have vq mutex and device mutex */
+static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
+{
+ return vq_memory_access_ok(log_base, vq->dev->memory,
+ vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
+ (!vq->log_used || log_access_ok(log_base, vq->log_addr,
+ sizeof *vq->used +
+ vq->num * sizeof *vq->used->ring));
+}
+
+/* Can we start vq? */
+/* Caller should have vq mutex and device mutex */
+int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+{
+ return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq, vq->log_base);
+}
+
+static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
+{
+ struct vhost_memory mem, *newmem, *oldmem;
+ unsigned long size = offsetof(struct vhost_memory, regions);
+ long r;
+ r = copy_from_user(&mem, m, size);
+ if (r)
+ return r;
+ if (mem.padding)
+ return -EOPNOTSUPP;
+ if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ return -E2BIG;
+ newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ if (!newmem)
+ return -ENOMEM;
+
+ memcpy(newmem, &mem, size);
+ r = copy_from_user(newmem->regions, m->regions,
+ mem.nregions * sizeof *m->regions);
+ if (r) {
+ kfree(newmem);
+ return r;
+ }
+
+ if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
+ return -EFAULT;
+ oldmem = d->memory;
+ rcu_assign_pointer(d->memory, newmem);
+ synchronize_rcu();
+ kfree(oldmem);
+ return 0;
+}
+
+static int init_used(struct vhost_virtqueue *vq,
+ struct vring_used __user *used)
+{
+ int r = put_user(vq->used_flags, &used->flags);
+ if (r)
+ return r;
+ return get_user(vq->last_used_idx, &used->idx);
+}
+
+static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
+{
+ struct file *eventfp, *filep = NULL,
+ *pollstart = NULL, *pollstop = NULL;
+ struct eventfd_ctx *ctx = NULL;
+ u32 __user *idxp = argp;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ struct vhost_vring_file f;
+ struct vhost_vring_addr a;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, idxp);
+ if (r < 0)
+ return r;
+ if (idx > d->nvqs)
+ return -ENOBUFS;
+
+ vq = d->vqs + idx;
+
+ mutex_lock(&vq->mutex);
+
+ switch (ioctl) {
+ case VHOST_SET_VRING_NUM:
+ /* Resizing ring with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data) {
+ r = -EBUSY;
+ break;
+ }
+ r = copy_from_user(&s, argp, sizeof s);
+ if (r < 0)
+ break;
+ if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
+ r = -EINVAL;
+ break;
+ }
+ vq->num = s.num;
+ break;
+ case VHOST_SET_VRING_BASE:
+ /* Moving base with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data) {
+ r = -EBUSY;
+ break;
+ }
+ r = copy_from_user(&s, argp, sizeof s);
+ if (r < 0)
+ break;
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->last_avail_idx = s.num;
+ /* Forget the cached index value. */
+ vq->avail_idx = vq->last_avail_idx;
+ break;
+ case VHOST_GET_VRING_BASE:
+ s.index = idx;
+ s.num = vq->last_avail_idx;
+ r = copy_to_user(argp, &s, sizeof s);
+ break;
+ case VHOST_SET_VRING_ADDR:
+ r = copy_from_user(&a, argp, sizeof a);
+ if (r < 0)
+ break;
+ if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ /* For 32bit, verify that the top 32bits of the user
+ data are set to zero. */
+ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
+ (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
+ (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
+ r = -EFAULT;
+ break;
+ }
+ if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
+ (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
+ (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+ r = -EINVAL;
+ break;
+ }
+
+ /* We only verify access here if backend is configured.
+ * If it is not, we don't as size might not have been setup.
+ * We will verify when backend is configured. */
+ if (vq->private_data) {
+ if (!vq_access_ok(vq->num,
+ (void __user *)(unsigned long)a.desc_user_addr,
+ (void __user *)(unsigned long)a.avail_user_addr,
+ (void __user *)(unsigned long)a.used_user_addr)) {
+ r = -EINVAL;
+ break;
+ }
+
+ /* Also validate log access for used ring if enabled. */
+ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
+ !log_access_ok(vq->log_base, a.log_guest_addr,
+ sizeof *vq->used +
+ vq->num * sizeof *vq->used->ring)) {
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ r = init_used(vq, (struct vring_used __user *)(unsigned long)
+ a.used_user_addr);
+ if (r)
+ break;
+ vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
+ vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
+ vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
+ vq->log_addr = a.log_guest_addr;
+ vq->used = (void __user *)(unsigned long)a.used_user_addr;
+ break;
+ case VHOST_SET_VRING_KICK:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->kick) {
+ pollstop = filep = vq->kick;
+ pollstart = vq->kick = eventfp;
+ } else
+ filep = eventfp;
+ break;
+ case VHOST_SET_VRING_CALL:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->call) {
+ filep = vq->call;
+ ctx = vq->call_ctx;
+ vq->call = eventfp;
+ vq->call_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ break;
+ case VHOST_SET_VRING_ERR:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->error) {
+ filep = vq->error;
+ vq->error = eventfp;
+ ctx = vq->error_ctx;
+ vq->error_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ break;
+ default:
+ r = -ENOIOCTLCMD;
+ }
+
+ if (pollstop && vq->handle_kick)
+ vhost_poll_stop(&vq->poll);
+
+ if (ctx)
+ eventfd_ctx_put(ctx);
+ if (filep)
+ fput(filep);
+
+ if (pollstart && vq->handle_kick)
+ vhost_poll_start(&vq->poll, vq->kick);
+
+ mutex_unlock(&vq->mutex);
+
+ if (pollstop && vq->handle_kick)
+ vhost_poll_flush(&vq->poll);
+ return r;
+}
+
+/* Caller must have device mutex */
+long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct file *eventfp, *filep = NULL;
+ struct eventfd_ctx *ctx = NULL;
+ u64 p;
+ long r;
+ int i, fd;
+
+ /* If you are not the owner, you can become one */
+ if (ioctl == VHOST_SET_OWNER) {
+ r = vhost_dev_set_owner(d);
+ goto done;
+ }
+
+ /* You must be the owner to do anything else */
+ r = vhost_dev_check_owner(d);
+ if (r)
+ goto done;
+
+ switch (ioctl) {
+ case VHOST_SET_MEM_TABLE:
+ r = vhost_set_memory(d, argp);
+ break;
+ case VHOST_SET_LOG_BASE:
+ r = copy_from_user(&p, argp, sizeof p);
+ if (r < 0)
+ break;
+ if ((u64)(unsigned long)p != p) {
+ r = -EFAULT;
+ break;
+ }
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq;
+ void __user *base = (void __user *)(unsigned long)p;
+ vq = d->vqs + i;
+ mutex_lock(&vq->mutex);
+ /* If ring is inactive, will check when it's enabled. */
+ if (vq->private_data && !vq_log_access_ok(vq, base))
+ r = -EFAULT;
+ else
+ vq->log_base = base;
+ mutex_unlock(&vq->mutex);
+ }
+ break;
+ case VHOST_SET_LOG_FD:
+ r = get_user(fd, (int __user *)argp);
+ if (r < 0)
+ break;
+ eventfp = fd == -1 ? NULL : eventfd_fget(fd);
+ if (IS_ERR(eventfp)) {
+ r = PTR_ERR(eventfp);
+ break;
+ }
+ if (eventfp != d->log_file) {
+ filep = d->log_file;
+ ctx = d->log_ctx;
+ d->log_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i].mutex);
+ d->vqs[i].log_ctx = d->log_ctx;
+ mutex_unlock(&d->vqs[i].mutex);
+ }
+ if (ctx)
+ eventfd_ctx_put(ctx);
+ if (filep)
+ fput(filep);
+ break;
+ default:
+ r = vhost_set_vring(d, ioctl, argp);
+ break;
+ }
+done:
+ return r;
+}
+
+static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
+ __u64 addr, __u32 len)
+{
+ struct vhost_memory_region *reg;
+ int i;
+ /* linear search is not brilliant, but we really have on the order of 6
+ * regions in practice */
+ for (i = 0; i < mem->nregions; ++i) {
+ reg = mem->regions + i;
+ if (reg->guest_phys_addr <= addr &&
+ reg->guest_phys_addr + reg->memory_size - 1 >= addr)
+ return reg;
+ }
+ return NULL;
+}
+
+/* TODO: This is really inefficient. We need something like get_user()
+ * (instruction directly accesses the data, with an exception table entry
+ * returning -EFAULT). See Documentation/x86/exception-tables.txt.
+ */
+static int set_bit_to_user(int nr, void __user *addr)
+{
+ unsigned long log = (unsigned long)addr;
+ struct page *page;
+ void *base;
+ int bit = nr + (log % PAGE_SIZE) * 8;
+ int r;
+ r = get_user_pages_fast(log, 1, 1, &page);
+ if (r)
+ return r;
+ base = kmap_atomic(page, KM_USER0);
+ set_bit(bit, base);
+ kunmap_atomic(base, KM_USER0);
+ set_page_dirty_lock(page);
+ put_page(page);
+ return 0;
+}
+
+static int log_write(void __user *log_base,
+ u64 write_address, u64 write_length)
+{
+ int r;
+ if (!write_length)
+ return 0;
+ write_address /= VHOST_PAGE_SIZE;
+ for (;;) {
+ u64 base = (u64)(unsigned long)log_base;
+ u64 log = base + write_address / 8;
+ int bit = write_address % 8;
+ if ((u64)(unsigned long)log != log)
+ return -EFAULT;
+ r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
+ if (r < 0)
+ return r;
+ if (write_length <= VHOST_PAGE_SIZE)
+ break;
+ write_length -= VHOST_PAGE_SIZE;
+ write_address += VHOST_PAGE_SIZE;
+ }
+ return r;
+}
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len)
+{
+ int i, r;
+
+ /* Make sure data written is seen before log. */
+ smp_wmb();
+ for (i = 0; i < log_num; ++i) {
+ u64 l = min(log[i].len, len);
+ r = log_write(vq->log_base, log[i].addr, l);
+ if (r < 0)
+ return r;
+ len -= l;
+ if (!len)
+ return 0;
+ }
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ /* Length written exceeds what we have stored. This is a bug. */
+ BUG();
+ return 0;
+}
+
+int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ struct iovec iov[], int iov_size)
+{
+ const struct vhost_memory_region *reg;
+ struct vhost_memory *mem;
+ struct iovec *_iov;
+ u64 s = 0;
+ int ret = 0;
+
+ rcu_read_lock();
+
+ mem = rcu_dereference(dev->memory);
+ while ((u64)len > s) {
+ u64 size;
+ if (ret >= iov_size) {
+ ret = -ENOBUFS;
+ break;
+ }
+ reg = find_region(mem, addr, len);
+ if (!reg) {
+ ret = -EFAULT;
+ break;
+ }
+ _iov = iov + ret;
+ size = reg->memory_size - addr + reg->guest_phys_addr;
+ _iov->iov_len = min((u64)len, size);
+ _iov->iov_base = (void *)(unsigned long)
+ (reg->userspace_addr + addr - reg->guest_phys_addr);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Each buffer in the virtqueues is actually a chain of descriptors. This
+ * function returns the next descriptor in the chain,
+ * or -1U if we're at the end. */
+static unsigned next_desc(struct vring_desc *desc)
+{
+ unsigned int next;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(desc->flags & VRING_DESC_F_NEXT))
+ return -1U;
+
+ /* Check they're not leading us off end of descriptors. */
+ next = desc->next;
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ /* We will use the result as an index in an array, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ return next;
+}
+
+static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ struct vring_desc *indirect)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ int ret;
+
+ /* Sanity check */
+ if (indirect->len % sizeof desc) {
+ vq_err(vq, "Invalid length in indirect descriptor: "
+ "len 0x%llx not multiple of 0x%zx\n",
+ (unsigned long long)indirect->len,
+ sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
+ ARRAY_SIZE(vq->indirect));
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ return ret;
+ }
+
+ /* We will use the result as an address to read from, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ count = indirect->len / sizeof desc;
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (count > USHORT_MAX + 1) {
+ vq_err(vq, "Indirect buffer length too big: %d\n",
+ indirect->len);
+ return -E2BIG;
+ }
+
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (++found > count) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "indirect size %u\n",
+ i, count);
+ return -EINVAL;
+ }
+ if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
+ sizeof desc)) {
+ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ iov_size - iov_count);
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
+ return ret;
+ }
+ /* If this is an input descriptor, increment that count. */
+ if (desc.flags & VRING_DESC_F_WRITE) {
+ *in_num += ret;
+ if (unlikely(log)) {
+ log[*log_num].addr = desc.addr;
+ log[*log_num].len = desc.len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (*in_num) {
+ vq_err(vq, "Indirect descriptor "
+ "has out after in: idx %d\n", i);
+ return -EINVAL;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(&desc)) != -1);
+ return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which
+ * is never a valid descriptor number) if none was found. */
+unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ struct vring_desc desc;
+ unsigned int i, head, found = 0;
+ u16 last_avail_idx;
+ int ret;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vq->last_avail_idx;
+ if (get_user(vq->avail_idx, &vq->avail->idx)) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return vq->num;
+ }
+
+ if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
+ vq_err(vq, "Guest moved used index from %u to %u",
+ last_avail_idx, vq->avail_idx);
+ return vq->num;
+ }
+
+ /* If there's nothing new since last we looked, return invalid. */
+ if (vq->avail_idx == last_avail_idx)
+ return vq->num;
+
+ /* Only get avail ring entries after they have been exposed by guest. */
+ smp_rmb();
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return vq->num;
+ }
+
+ /* If their number is silly, that's an error. */
+ if (head >= vq->num) {
+ vq_err(vq, "Guest says index %u > %u is available",
+ head, vq->num);
+ return vq->num;
+ }
+
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ i = head;
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (i >= vq->num) {
+ vq_err(vq, "Desc index is %u > %u, head = %u",
+ i, vq->num, head);
+ return vq->num;
+ }
+ if (++found > vq->num) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "vq size %u head %u\n",
+ i, vq->num, head);
+ return vq->num;
+ }
+ ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
+ if (ret) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc + i);
+ return vq->num;
+ }
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ ret = get_indirect(dev, vq, iov, iov_size,
+ out_num, in_num,
+ log, log_num, &desc);
+ if (ret < 0) {
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
+ return vq->num;
+ }
+ continue;
+ }
+
+ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ iov_size - iov_count);
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
+ return vq->num;
+ }
+ if (desc.flags & VRING_DESC_F_WRITE) {
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+ if (unlikely(log)) {
+ log[*log_num].addr = desc.addr;
+ log[*log_num].len = desc.len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (*in_num) {
+ vq_err(vq, "Descriptor has out after in: "
+ "idx %d\n", i);
+ return vq->num;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(&desc)) != -1);
+
+ /* On success, increment avail index. */
+ vq->last_avail_idx++;
+ return head;
+}
+
+/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
+{
+ vq->last_avail_idx--;
+}
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
+{
+ struct vring_used_elem *used;
+
+ /* The virtqueue contains a ring of used buffers. Get a pointer to the
+ * next entry in that used ring. */
+ used = &vq->used->ring[vq->last_used_idx % vq->num];
+ if (put_user(head, &used->id)) {
+ vq_err(vq, "Failed to write used id");
+ return -EFAULT;
+ }
+ if (put_user(len, &used->len)) {
+ vq_err(vq, "Failed to write used len");
+ return -EFAULT;
+ }
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
+ vq_err(vq, "Failed to increment used idx");
+ return -EFAULT;
+ }
+ if (unlikely(vq->log_used)) {
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring *
+ (vq->last_used_idx % vq->num),
+ sizeof *vq->used->ring);
+ log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring);
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ vq->last_used_idx++;
+ return 0;
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __u16 flags = 0;
+ if (get_user(flags, &vq->avail->flags)) {
+ vq_err(vq, "Failed to get flags");
+ return;
+ }
+
+ /* If they don't want an interrupt, don't signal, unless empty. */
+ if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
+ (vq->avail_idx != vq->last_avail_idx ||
+ !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
+ return;
+
+ /* Signal the Guest tell them we used something up. */
+ if (vq->call_ctx)
+ eventfd_signal(vq->call_ctx, 1);
+}
+
+/* And here's the combo meal deal. Supersize me! */
+void vhost_add_used_and_signal(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ unsigned int head, int len)
+{
+ vhost_add_used(vq, head, len);
+ vhost_signal(dev, vq);
+}
+
+/* OK, now we need to know about added descriptors. */
+bool vhost_enable_notify(struct vhost_virtqueue *vq)
+{
+ u16 avail_idx;
+ int r;
+ if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
+ return false;
+ vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ /* They could have slipped one in as we were doing that: make
+ * sure it's written, then check again. */
+ smp_mb();
+ r = get_user(avail_idx, &vq->avail->idx);
+ if (r) {
+ vq_err(vq, "Failed to check avail idx at %p: %d\n",
+ &vq->avail->idx, r);
+ return false;
+ }
+
+ return avail_idx != vq->last_avail_idx;
+}
+
+/* We don't need to be notified again. */
+void vhost_disable_notify(struct vhost_virtqueue *vq)
+{
+ int r;
+ if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
+ return;
+ vq->used_flags |= VRING_USED_F_NO_NOTIFY;
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r)
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+}
+
+int vhost_init(void)
+{
+ vhost_workqueue = create_singlethread_workqueue("vhost");
+ if (!vhost_workqueue)
+ return -ENOMEM;
+ return 0;
+}
+
+void vhost_cleanup(void)
+{
+ destroy_workqueue(vhost_workqueue);
+}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
new file mode 100644
index 000000000000..44591ba9b07a
--- /dev/null
+++ b/drivers/vhost/vhost.h
@@ -0,0 +1,161 @@
+#ifndef _VHOST_H
+#define _VHOST_H
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/skbuff.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+struct vhost_device;
+
+enum {
+ /* Enough place for all fragments, head, and virtio net header. */
+ VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
+};
+
+/* Poll a file (eventfd or socket) */
+/* Note: there's nothing vhost specific about this structure. */
+struct vhost_poll {
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_t wait;
+ /* struct which will handle all actual work. */
+ struct work_struct work;
+ unsigned long mask;
+};
+
+void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
+ unsigned long mask);
+void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+void vhost_poll_stop(struct vhost_poll *poll);
+void vhost_poll_flush(struct vhost_poll *poll);
+void vhost_poll_queue(struct vhost_poll *poll);
+
+struct vhost_log {
+ u64 addr;
+ u64 len;
+};
+
+/* The virtqueue structure describes a queue attached to a device. */
+struct vhost_virtqueue {
+ struct vhost_dev *dev;
+
+ /* The actual ring of buffers. */
+ struct mutex mutex;
+ unsigned int num;
+ struct vring_desc __user *desc;
+ struct vring_avail __user *avail;
+ struct vring_used __user *used;
+ struct file *kick;
+ struct file *call;
+ struct file *error;
+ struct eventfd_ctx *call_ctx;
+ struct eventfd_ctx *error_ctx;
+ struct eventfd_ctx *log_ctx;
+
+ struct vhost_poll poll;
+
+ /* The routine to call when the Guest pings us, or timeout. */
+ work_func_t handle_kick;
+
+ /* Last available index we saw. */
+ u16 last_avail_idx;
+
+ /* Caches available index value from user. */
+ u16 avail_idx;
+
+ /* Last index we used. */
+ u16 last_used_idx;
+
+ /* Used flags */
+ u16 used_flags;
+
+ /* Log writes to used structure. */
+ bool log_used;
+ u64 log_addr;
+
+ struct iovec indirect[VHOST_NET_MAX_SG];
+ struct iovec iov[VHOST_NET_MAX_SG];
+ struct iovec hdr[VHOST_NET_MAX_SG];
+ size_t hdr_size;
+ /* We use a kind of RCU to access private pointer.
+ * All readers access it from workqueue, which makes it possible to
+ * flush the workqueue instead of synchronize_rcu. Therefore readers do
+ * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
+ * work item execution acts instead of rcu_read_lock() and the end of
+ * work item execution acts instead of rcu_read_lock().
+ * Writers use virtqueue mutex. */
+ void *private_data;
+ /* Log write descriptors */
+ void __user *log_base;
+ struct vhost_log log[VHOST_NET_MAX_SG];
+};
+
+struct vhost_dev {
+ /* Readers use RCU to access memory table pointer
+ * log base pointer and features.
+ * Writers use mutex below.*/
+ struct vhost_memory *memory;
+ struct mm_struct *mm;
+ struct mutex mutex;
+ unsigned acked_features;
+ struct vhost_virtqueue *vqs;
+ int nvqs;
+ struct file *log_file;
+ struct eventfd_ctx *log_ctx;
+};
+
+long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
+long vhost_dev_check_owner(struct vhost_dev *);
+long vhost_dev_reset_owner(struct vhost_dev *);
+void vhost_dev_cleanup(struct vhost_dev *);
+long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
+int vhost_vq_access_ok(struct vhost_virtqueue *vq);
+int vhost_log_access_ok(struct vhost_dev *);
+
+unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
+void vhost_discard_vq_desc(struct vhost_virtqueue *);
+
+int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
+void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
+void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
+ unsigned int head, int len);
+void vhost_disable_notify(struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_virtqueue *);
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len);
+
+int vhost_init(void);
+void vhost_cleanup(void);
+
+#define vq_err(vq, fmt, ...) do { \
+ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ if ((vq)->error_ctx) \
+ eventfd_signal((vq)->error_ctx, 1);\
+ } while (0)
+
+enum {
+ VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1 << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1 << VHOST_F_LOG_ALL) |
+ (1 << VHOST_NET_F_VIRTIO_NET_HDR),
+};
+
+static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
+{
+ unsigned acked_features = rcu_dereference(dev->acked_features);
+ return acked_features & (1 << bit);
+}
+
+#endif
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecde93e4..71929ee00d69 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -334,6 +334,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
return true;
}
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ unsigned int i;
+ void *buf;
+
+ START_USE(vq);
+
+ for (i = 0; i < vq->vring.num; i++) {
+ if (!vq->data[i])
+ continue;
+ /* detach_buf clears data, so grab it now. */
+ buf = vq->data[i];
+ detach_buf(vq, i);
+ END_USE(vq);
+ return buf;
+ }
+ /* That should have freed everything. */
+ BUG_ON(vq->num_free != vq->vring.num);
+
+ END_USE(vq);
+ return NULL;
+}
+
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +384,7 @@ static struct virtqueue_ops vring_vq_ops = {
.kick = vring_kick,
.disable_cb = vring_disable_cb,
.enable_cb = vring_enable_cb,
+ .detach_unused_buf = vring_detach_unused_buf,
};
struct virtqueue *vring_new_virtqueue(unsigned int num,