aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-02-10 14:17:10 +0100
committerPatrick McHardy <kaber@trash.net>2010-02-10 14:17:10 +0100
commit9ab99d5a43e9f283738fd9fd365539306d13eaac (patch)
tree0214a63e3f4f7f4f187f0139e4a5d8abe453902b /drivers/net
parentnetfilter: fix build failure with CONNTRACK=y NAT=n (diff)
parentMerge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 (diff)
downloadlinux-dev-9ab99d5a43e9f283738fd9fd365539306d13eaac.tar.xz
linux-dev-9ab99d5a43e9f283738fd9fd365539306d13eaac.zip
Merge branch 'master' of /repos/git/net-next-2.6
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c9
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c5
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c9
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/benet/be.h6
-rw-r--r--drivers/net/benet/be_cmds.c68
-rw-r--r--drivers/net/benet/be_cmds.h36
-rw-r--r--drivers/net/benet/be_ethtool.c140
-rw-r--r--drivers/net/benet/be_main.c35
-rw-r--r--drivers/net/bfin_mac.c5
-rw-r--r--drivers/net/bnx2.c133
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x_main.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/mcp251x.c428
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c472
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/sge.c36
-rw-r--r--drivers/net/davinci_emac.c7
-rw-r--r--drivers/net/defxx.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_ethtool.c19
-rw-r--r--drivers/net/e1000/e1000_main.c55
-rw-r--r--drivers/net/e1000e/82571.c70
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h21
-rw-r--r--drivers/net/e1000e/es2lan.c34
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h13
-rw-r--r--drivers/net/e1000e/ich8lan.c79
-rw-r--r--drivers/net/e1000e/lib.c282
-rw-r--r--drivers/net/e1000e/netdev.c132
-rw-r--r--drivers/net/e1000e/phy.c85
-rw-r--r--drivers/net/enic/enic_main.c2
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/fsl_pq_mdio.c30
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/e1000_phy.c9
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c51
-rw-r--r--drivers/net/igbvf/netdev.c20
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_sir.c823
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c12
-rw-r--r--drivers/net/ixgbe/Makefile5
-rw-r--r--drivers/net/ixgbe/ixgbe.h52
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c134
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c84
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c629
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h59
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c2
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/macvlan.c117
-rw-r--r--drivers/net/macvtap.c581
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h10
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c195
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c25
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/netxen/netxen_nic_main.c22
-rw-r--r--drivers/net/niu.c8
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c4
-rw-r--r--drivers/net/pcnet32.c5
-rw-r--r--drivers/net/phy/broadcom.c4
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/mdio_bus.c72
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c47
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/ps3_gelic_wireless.c149
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1126
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1015
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1275
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1541
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2720
-rw-r--r--drivers/net/qlge/qlge.h436
-rw-r--r--drivers/net/qlge/qlge_dbg.c1171
-rw-r--r--drivers/net/qlge/qlge_main.c427
-rw-r--r--drivers/net/qlge/qlge_mpi.c165
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rrunner.c4
-rw-r--r--drivers/net/s2io.c4
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c8
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c7
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi.c121
-rw-r--r--drivers/net/sfc/mcdi.h2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h206
-rw-r--r--drivers/net/sfc/mcdi_phy.c125
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/mtd.c5
-rw-r--r--drivers/net/sfc/net_driver.h18
-rw-r--r--drivers/net/sfc/nic.c15
-rw-r--r--drivers/net/sfc/qt202x_phy.c245
-rw-r--r--drivers/net/sfc/selftest.c38
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c17
-rw-r--r--drivers/net/sfc/tenxpress.c140
-rw-r--r--drivers/net/sfc/tx.c4
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/skfddi.c21
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c219
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c7
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h279
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)204
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c245
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)351
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c119
-rw-r--r--drivers/net/tg3.h28
-rw-r--r--drivers/net/tlan.c30
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/de2104x.c140
-rw-r--r--drivers/net/tulip/dmfe.c86
-rw-r--r--drivers/net/tulip/eeprom.c47
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c189
-rw-r--r--drivers/net/tulip/uli526x.c48
-rw-r--r--drivers/net/tulip/winbond-840.c179
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c107
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/ucc_geth.c71
-rw-r--r--drivers/net/ucc_geth.h13
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/hso.c105
-rw-r--r--drivers/net/usb/mcs7830.c246
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/via-rhine.c43
-rw-r--r--drivers/net/via-velocity.c10
-rw-r--r--drivers/net/virtio_net.c442
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vxge/vxge-main.c16
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/airo.c36
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h26
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c46
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h84
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c161
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c442
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c124
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c862
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1344
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c57
-rw-r--r--drivers/net/wireless/ath/debug.h8
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/b43.h8
-rw-r--r--drivers/net/wireless/b43/main.c36
-rw-r--r--drivers/net/wireless/b43/phy_common.c45
-rw-r--r--drivers/net/wireless/b43/phy_common.h10
-rw-r--r--drivers/net/wireless/b43/phy_lp.c76
-rw-r--r--drivers/net/wireless/b43/phy_n.c2757
-rw-r--r--drivers/net/wireless/b43/phy_n.h94
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c744
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h100
-rw-r--r--drivers/net/wireless/b43legacy/main.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c194
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1312
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c104
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c154
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c78
-rw-r--r--drivers/net/wireless/libertas/mesh.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/mwl8k.c1367
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c84
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c314
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/yellowfin.c2
417 files changed, 35537 insertions, 8333 deletions
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc231153e55..77cf0901a441 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,6 +56,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
/* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
- sizeof(dev->dev_addr));
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
/* The Tx-block list is written as needed. We just set up the values. */
lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815d..5df46c230b07 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
};
-static struct pci_device_id vortex_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9e..60bc0b0ad4f3 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
@@ -899,7 +899,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
@@ -909,7 +909,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -918,8 +918,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daabd..c7d6f094cc7a 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -231,7 +231,7 @@ static const struct {
};
-static struct pci_device_id rtl8139_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
@@ -2509,7 +2509,7 @@ static void __set_rx_mode (struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
@@ -2521,7 +2521,7 @@ static void __set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -2530,8 +2530,7 @@ static void __set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd9a09c72dff..ef662e15c3d3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,18 @@ config MACVLAN
To compile this driver as a module, choose M here: the module
will be called macvlan.
+config MACVTAP
+ tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ depends on MACVLAN
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+ can be added in the same way as a macvlan device, using 'type
+ macvlan', and then be accessed through the tap user space interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macvtap.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -2356,20 +2368,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config GELIC_WIRELESS_OLD_PSK_INTERFACE
- bool "PS3 Wireless private PSK interface (OBSOLETE)"
- depends on GELIC_WIRELESS
- select WEXT_PRIV
- help
- This option retains the obsolete private interface to pass
- the PSK from user space programs to the driver. The PSK
- stands for 'Pre Shared Key' and is used for WPA[2]-PSK
- (WPA-Personal) environment.
- If WPA[2]-PSK is used and you need to use old programs that
- support only this old interface, say Y. Otherwise N.
-
- If unsure, say N.
-
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
depends on FSL_SOC
@@ -2618,6 +2616,28 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
@@ -2756,6 +2776,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ help
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da9..95958032cd31 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -148,6 +149,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_PPP) += ppp_generic.o
@@ -167,6 +169,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
+obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a994753..ec624ab03e88 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
-static struct pci_device_id acenic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc75..bb27b27d9672 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static struct pci_device_id amd8111e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1176,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
- printk("************Driver bug! \
- interrupt while in poll\n");
+ printk("************Driver bug! interrupt while in poll\n");
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754d..b68e1eb405ff 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct pci_device_id com20020pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b1..e2c202493fa7 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
static irqreturn_t ariadne_interrupt(int irq, void *data);
static int ariadne_close(struct net_device *dev);
static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
-#ifdef HAVE_MULTICAST
static void set_multicast_list(struct net_device *dev);
-#endif
static void memcpyw(volatile u_short *dest, u_short *src, int len)
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e6b67b..39e1c0d39476 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
tristate "Nuvoton w90p910 Ethernet support"
depends on ARM && ARCH_W90X900
select PHYLIB
+ select MII
help
Say Y here if you want to use built-in Ethernet ports
on w90p910 processor.
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895c..bf72d57a0afd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -20,9 +22,9 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <mach/ep93xx-regs.h>
-#include <mach/platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg);
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
rstat->rstat1 = 0;
if (!(rstat0 & RSTAT0_EOF))
- printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
- printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
- printk(KERN_CRIT "ep93xx_rx: entry mismatch "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
length = rstat1 & RSTAT1_FRAME_LENGTH;
if (length > MAX_PKT_SIZE) {
- printk(KERN_NOTICE "ep93xx_rx: invalid length "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
goto err;
}
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
tstat->tstat0 = 0;
if (tstat0 & TSTAT0_FA)
- printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
- " %.8x\n", tstat0);
+ pr_crit("frame aborted %.8x\n", tstat0);
if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
- printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch "
- " %.8x\n", tstat0);
+ pr_crit("entry mismatch %.8x\n", tstat0);
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
return 1;
}
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n");
+ pr_crit("hw failed to start\n");
return 1;
}
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
}
if (i == 10)
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
}
static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
}
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int data;
- int i;
-
- wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10) {
- printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
- data = 0xffff;
- } else {
- data = rdl(ep, REG_MIIDATA);
- }
-
- return data;
-}
-
-static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int i;
-
- wrl(ep, REG_MIIDATA, data);
- wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10)
- printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
-}
-
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
int err;
if (pdev == NULL)
return -ENODEV;
data = pdev->dev.platform_data;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
dev = ep93xx_dev_alloc(data);
if (dev == NULL) {
err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- ep->res = request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
- dev_name(&pdev->dev));
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- ep->base_addr = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start);
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
if (ep->base_addr == NULL) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_out;
}
- ep->irq = pdev->resource[1].start;
+ ep->irq = irq;
ep->mii.phy_id = data->phy_id;
ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
- ep->irq, data->dev_addr[0], data->dev_addr[1],
- data->dev_addr[2], data->dev_addr[3],
- data->dev_addr[4], data->dev_addr[5]);
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index c5721cb38265..cc9ed8643910 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -663,7 +663,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
- if (i < 0 || (DREG & CSR0_ERR)) {
+ if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0b..d98095df05be 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -35,7 +35,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1c_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
/* required last entry */
@@ -2596,11 +2596,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
- dev_dbg(&pdev->dev,
- "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9b..d59f8e89c65d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
@@ -2378,10 +2378,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127c..9ba547069db3 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
/*
* atl1_pci_tbl - PCI Device ID Table
*/
-static const struct pci_device_id atl1_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
/* required last entry */
{0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c0451d75cdcf..40cf9e5cb9e2 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
-static struct pci_device_id atl2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
@@ -1959,12 +1959,15 @@ static int atl2_get_eeprom(struct net_device *netdev,
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
- if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
- return -EIO;
+ if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
+ ret_val = -EIO;
+ goto free;
+ }
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
+free:
kfree(eeprom_buff);
return ret_val;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
size = (res->end - res->start) + 1;
ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (ax->mem2 == NULL) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb69586..44b66be38134 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
-static const struct pci_device_id b44_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014d27ed..5bc74590c73e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -275,8 +275,14 @@ struct be_adapter {
u32 tx_fc; /* Tx flow control */
int link_speed;
u8 port_type;
+ u8 transceiver;
+ u8 generation; /* BladeEngine ASIC generation */
};
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+
extern const struct ethtool_ops be_ethtool_ops;
#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd98dc0c..3227b11131c4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
wrb->tag0 = opcode;
- be_dws_cpu_to_le(wrb, 20);
+ be_dws_cpu_to_le(wrb, 8);
}
/* Don't touch the hdr after it's prepared */
@@ -1479,6 +1479,41 @@ err:
return status;
}
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
@@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = 4;
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
@@ -1571,3 +1607,33 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef156ed..c622a968c379 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -155,6 +156,7 @@ struct be_mcc_mailbox {
#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
@@ -163,7 +165,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -821,6 +824,19 @@ struct be_cmd_resp_loopback_test {
u32 ticks_compl;
};
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
/********************** DDR DMA test *********************/
struct be_cmd_req_ddrdma_test {
struct be_cmd_req_hdr hdr;
@@ -840,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
u8 rcv_buff[4096];
};
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -912,3 +941,8 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92cbd689..09d8899b2de9 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -112,12 +112,14 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"PHY Loopback test",
"External Loopback test",
"DDR DMA test"
+ "Link test"
};
#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +341,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_read_port_type(adapter, adapter->port_num,
&connector);
- switch (connector) {
- case 7:
- ecmd->port = PORT_FIBRE;
- break;
- default:
- ecmd->port = PORT_TP;
- break;
+ if (!status) {
+ switch (connector) {
+ case 7:
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 0:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_INTERNAL;
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
ecmd->phy_address = adapter->port_num;
- ecmd->transceiver = XCVR_INTERNAL;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
return 0;
}
@@ -489,37 +513,56 @@ err:
return ret;
}
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ bool link_up;
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_MAC_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[0] != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_PHY_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[1] != 0)
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_ONE_PORT_EXT_LOOPBACK,
- 1500, 2, 0xabc);
- if (data[2] != 0)
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
- data[3] = be_test_ddr_dma(adapter);
- if (data[3] != 0)
- test->flags |= ETH_TEST_FL_FAILED;
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
}
+ if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+ &qos_link_speed) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (mac_speed) {
+ data[4] = 1;
+ }
}
static int
@@ -536,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, file_name);
}
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
+ &eeprom_cmd.dma);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data, eeprom->len);
+ }
+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f7902c16d..626b76c0ebc7 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
static void be_post_rx_frags(struct be_adapter *adapter)
{
struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
- struct be_rx_page_info *page_info = NULL;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct page *pagep = NULL;
struct be_eth_rx_d *rxd;
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
rxd = queue_head_node(rxq);
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
- queue_head_inc(rxq);
/* Any space left in the current big page for another frag? */
if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
pagep = NULL;
page_info->last_page_user = true;
}
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
page_info = &page_info_tbl[rxq->head];
}
if (pagep)
- page_info->last_page_user = true;
+ prev_page_info->last_page_user = true;
if (posted) {
atomic_add(posted, &rxq->used);
@@ -1348,7 +1350,7 @@ static irqreturn_t be_intx(int irq, void *dev)
int isr;
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- be_pci_func(adapter) * CEV_ISR_SIZE);
+ (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
@@ -2049,6 +2051,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
+ int pcicfg_reg;
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@@ -2062,8 +2065,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
goto pci_map_err;
adapter->db = addr;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
- pci_resource_len(adapter->pdev, 1));
+ if (adapter->generation == BE_GEN2)
+ pcicfg_reg = 1;
+ else
+ pcicfg_reg = 0;
+
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
if (addr == NULL)
goto pci_map_err;
adapter->pcicfg = addr;
@@ -2160,6 +2168,7 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
if (cmd->va == NULL)
return -1;
+ memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -2238,6 +2247,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea3990d07..0b23bc4f56c6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
u32 sclk, mdc_div;
/* Enable PHY output early */
- if (!(bfin_read_VR_CTL() & PHYCLKOE))
- bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+ if (!(bfin_read_VR_CTL() & CLKBUFOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
sclk = get_sclk();
mdc_div = ((sclk / MDC_CLK) / 2) - 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e4..5917b941aca2 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -48,7 +48,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/list.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -3579,14 +3578,14 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
+ if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
/* Add all entries into to the match filter list */
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
@@ -6145,6 +6144,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -6227,6 +6230,8 @@ bnx2_open(struct net_device *dev)
atomic_set(&bp->intr_sem, 0);
+ memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
bnx2_enable_int(bp);
if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6538,92 +6543,121 @@ bnx2_close(struct net_device *dev)
return 0;
}
-#define GET_NET_STATS64(ctr) \
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ int i;
+
+ /* The 1st 10 counters are 64-bit counters */
+ for (i = 0; i < 20; i += 2) {
+ u32 hi;
+ u64 lo;
+
+ hi = *(temp_stats + i) + *(hw_stats + i);
+ lo = *(temp_stats + i + 1) + *(hw_stats + i + 1);
+ if (lo > 0xffffffff)
+ hi++;
+ *(temp_stats + i) = hi;
+ *(temp_stats + i + 1) = lo & 0xffffffff;
+ }
+
+ for ( ; i < sizeof(struct statistics_block) / 4; i++)
+ *(temp_stats + i) = *(temp_stats + i) + *(hw_stats + i);
+}
+
+#define GET_64BIT_NET_STATS64(ctr) \
(unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
(unsigned long) (ctr##_lo)
-#define GET_NET_STATS32(ctr) \
+#define GET_64BIT_NET_STATS32(ctr) \
(ctr##_lo)
#if (BITS_PER_LONG == 64)
-#define GET_NET_STATS GET_NET_STATS64
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
#else
-#define GET_NET_STATS GET_NET_STATS32
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
#endif
+#define GET_32BIT_NET_STATS(ctr) \
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
static struct net_device_stats *
bnx2_get_stats(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- struct statistics_block *stats_blk = bp->stats_blk;
struct net_device_stats *net_stats = &dev->stats;
if (bp->stats_blk == NULL) {
return net_stats;
}
net_stats->rx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
net_stats->tx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
net_stats->rx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCInOctets);
+ GET_64BIT_NET_STATS(stat_IfHCInOctets);
net_stats->tx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
+ GET_64BIT_NET_STATS(stat_IfHCOutOctets);
net_stats->multicast =
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
net_stats->collisions =
- (unsigned long) stats_blk->stat_EtherStatsCollisions;
+ GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
net_stats->rx_length_errors =
- (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
- stats_blk->stat_EtherStatsOverrsizePkts);
+ GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+ GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
net_stats->rx_over_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
net_stats->rx_frame_errors =
- (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
net_stats->rx_crc_errors =
- (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
net_stats->rx_errors = net_stats->rx_length_errors +
net_stats->rx_over_errors + net_stats->rx_frame_errors +
net_stats->rx_crc_errors;
net_stats->tx_aborted_errors =
- (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
- stats_blk->stat_Dot3StatsLateCollisions);
+ GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+ GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
(CHIP_ID(bp) == CHIP_ID_5708_A0))
net_stats->tx_carrier_errors = 0;
else {
net_stats->tx_carrier_errors =
- (unsigned long)
- stats_blk->stat_Dot3StatsCarrierSenseErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
}
net_stats->tx_errors =
- (unsigned long)
- stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
- +
+ GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
net_stats->tx_aborted_errors +
net_stats->tx_carrier_errors;
net_stats->rx_missed_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
return net_stats;
}
@@ -7083,6 +7117,9 @@ static int
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
{
if (netif_running(bp->dev)) {
+ /* Reset will erase chipset stats; save them */
+ bnx2_save_stats(bp);
+
bnx2_netif_stop(bp);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
@@ -7427,6 +7464,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
struct bnx2 *bp = netdev_priv(dev);
int i;
u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
u8 *stats_len_arr = NULL;
if (hw_stats == NULL) {
@@ -7443,21 +7481,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
stats_len_arr = bnx2_5708_stats_len_arr;
for (i = 0; i < BNX2_NUM_STATS; i++) {
+ unsigned long offset;
+
if (stats_len_arr[i] == 0) {
/* skip this counter */
buf[i] = 0;
continue;
}
+
+ offset = bnx2_stats_offset_arr[i];
if (stats_len_arr[i] == 4) {
/* 4-byte counter */
- buf[i] = (u64)
- *(hw_stats + bnx2_stats_offset_arr[i]);
+ buf[i] = (u64) *(hw_stats + offset) +
+ *(temp_stats + offset);
continue;
}
/* 8-byte counter */
- buf[i] = (((u64) *(hw_stats +
- bnx2_stats_offset_arr[i])) << 32) +
- *(hw_stats + bnx2_stats_offset_arr[i] + 1);
+ buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+ *(hw_stats + offset + 1) +
+ (((u64) *(temp_stats + offset)) << 32) +
+ *(temp_stats + offset + 1);
}
}
@@ -7625,7 +7668,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
}
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void
poll_bnx2(struct net_device *dev)
{
@@ -7825,6 +7868,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags = 0;
bp->phy_flags = 0;
+ bp->temp_stats_blk =
+ kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+ if (bp->temp_stats_blk == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
@@ -8229,7 +8280,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2,
#endif
};
@@ -8346,6 +8397,8 @@ bnx2_remove_one(struct pci_dev *pdev)
if (bp->regview)
iounmap(bp->regview);
+ kfree(bp->temp_stats_blk);
+
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a0..b860fbbff355 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6851,6 +6851,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
struct statistics_block *stats_blk;
+ struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
int ctx_pages;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba13520d87..ffc7381969ae 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -140,7 +140,7 @@ static struct {
};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+ bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+ CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif
@@ -11729,7 +11731,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -11753,7 +11755,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0fb7a4964e75..822f586d72af 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave->dev->master->name);
+ best->slave ? best->slave->dev->master->name : "NULL");
}
best->is_active = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6a42a1453afa..1787e3c86573 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3307,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
-static void bond_create_proc_dir(struct bond_net *bn)
+static void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3320,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3338,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
{
}
-static void bond_create_proc_dir(struct bond_net *bn)
+static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
@@ -3650,7 +3650,7 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
/* something went wrong - fail the open operation */
- return -1;
+ return -ENOMEM;
}
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
@@ -3742,7 +3742,7 @@ static int bond_close(struct net_device *bond_dev)
static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device_stats *stats = &bond->stats;
+ struct net_device_stats *stats = &bond_dev->stats;
struct net_device_stats local_stats;
struct slave *slave;
int i;
@@ -4955,7 +4955,7 @@ out_netdev:
goto out;
}
-static int bond_net_init(struct net *net)
+static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
@@ -4967,7 +4967,7 @@ static int bond_net_init(struct net *net)
return 0;
}
-static void bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 558ec1352527..257a7a4dfce9 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -197,7 +197,6 @@ struct bonding {
s8 send_grat_arp;
s8 send_unsol_na;
s8 setup_by_slave;
- struct net_device_stats stats;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c0..a2f29a38798a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523cc..bf7f9ba2d903 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 val;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
/* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
priv->can.bittiming_const = &bfin_can_bittiming_const;
priv->can.do_set_bittiming = bfin_can_set_bittiming;
priv->can.do_set_mode = bfin_can_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
return dev;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322b..f08f1202ff00 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,6 +592,8 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 9c5a1537939c..f8cc168ec76c 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -180,6 +180,14 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
#define GET_BYTE(val, byte) \
(((val) >> ((byte) * 8)) & 0xff)
@@ -219,7 +227,8 @@ struct mcp251x_priv {
struct net_device *net;
struct spi_device *spi;
- struct mutex spi_lock; /* SPI buffer lock */
+ struct mutex mcp_lock; /* SPI device lock */
+
u8 *spi_tx_buf;
u8 *spi_rx_buf;
dma_addr_t spi_tx_dma;
@@ -227,11 +236,11 @@ struct mcp251x_priv {
struct sk_buff *tx_skb;
int tx_len;
+
struct workqueue_struct *wq;
struct work_struct tx_work;
- struct work_struct irq_work;
- struct completion awake;
- int wake;
+ struct work_struct restart_work;
+
int force_quit;
int after_suspend;
#define AFTER_SUSPEND_UP 1
@@ -245,7 +254,8 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- net->stats.tx_errors++;
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
if (priv->tx_skb)
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
@@ -300,16 +310,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
u8 val = 0;
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
mcp251x_spi_trans(spi, 3);
val = priv->spi_rx_buf[2];
- mutex_unlock(&priv->spi_lock);
-
return val;
}
@@ -317,15 +323,11 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
mcp251x_spi_trans(spi, 3);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -333,16 +335,12 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
mcp251x_spi_trans(spi, 4);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
@@ -358,10 +356,8 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
buf[i]);
} else {
- mutex_lock(&priv->spi_lock);
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
mcp251x_spi_trans(spi, TXBDAT_OFF + len);
- mutex_unlock(&priv->spi_lock);
}
}
@@ -408,13 +404,9 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-
- mutex_unlock(&priv->spi_lock);
}
}
@@ -467,21 +459,6 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
-static void mcp251x_hw_wakeup(struct spi_device *spi)
-{
- struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
-
- priv->wake = 1;
-
- /* Can only wake up by generating a wake-up interrupt. */
- mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
- mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
-
- /* Wait until the device is awake */
- if (!wait_for_completion_timeout(&priv->awake, HZ))
- dev_err(&spi->dev, "MCP251x didn't wake-up\n");
-}
-
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -490,16 +467,11 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
if (priv->tx_skb || priv->tx_len) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
- netif_stop_queue(net);
return NETDEV_TX_BUSY;
}
- if (skb->len != sizeof(struct can_frame)) {
- dev_err(&spi->dev, "dropping packet - bad length\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
+ if (can_dropped_invalid_skb(net, skb))
return NETDEV_TX_OK;
- }
netif_stop_queue(net);
priv->tx_skb = skb;
@@ -515,12 +487,13 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
+ mcp251x_clean(net);
/* We have to delay work since SPI I/O may sleep */
priv->can.state = CAN_STATE_ERROR_ACTIVE;
priv->restart_tx = 1;
if (priv->can.restart_ms == 0)
priv->after_suspend = AFTER_SUSPEND_RESTART;
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
break;
default:
return -EOPNOTSUPP;
@@ -529,7 +502,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
return 0;
}
-static void mcp251x_set_normal_mode(struct spi_device *spi)
+static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
unsigned long timeout;
@@ -537,12 +510,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
- CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
- CANINTF_MERRF);
+ CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* Put device into loopback mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
} else {
/* Put device into normal mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
@@ -554,11 +529,12 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
if (time_after(jiffies, timeout)) {
dev_err(&spi->dev, "MCP251x didn't"
" enter in normal mode\n");
- return;
+ return -EBUSY;
}
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
}
static int mcp251x_do_set_bittiming(struct net_device *net)
@@ -589,33 +565,39 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
{
mcp251x_do_set_bittiming(net);
- /* Enable RX0->RX1 buffer roll over and disable filters */
- mcp251x_write_bits(spi, RXBCTRL(0),
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
- mcp251x_write_bits(spi, RXBCTRL(1),
- RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(0),
+ RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(1),
+ RXBCTRL_RXM0 | RXBCTRL_RXM1);
return 0;
}
-static void mcp251x_hw_reset(struct spi_device *spi)
+static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
int ret;
-
- mutex_lock(&priv->spi_lock);
+ unsigned long timeout;
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-
ret = spi_write(spi, priv->spi_tx_buf, 1);
-
- mutex_unlock(&priv->spi_lock);
-
- if (ret)
+ if (ret) {
dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
+ return -EIO;
+ }
+
/* Wait for reset to finish */
+ timeout = jiffies + HZ;
mdelay(10);
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
+ != CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't"
+ " enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
+ return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -639,63 +621,17 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
-static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
-{
- struct net_device *net = (struct net_device *)dev_id;
- struct mcp251x_priv *priv = netdev_priv(net);
-
- /* Schedule bottom half */
- if (!work_pending(&priv->irq_work))
- queue_work(priv->wq, &priv->irq_work);
-
- return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
+static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- ret = open_candev(net);
- if (ret) {
- dev_err(&spi->dev, "unable to set initial baudrate!\n");
- return ret;
- }
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
-
- priv->force_quit = 0;
- priv->tx_skb = NULL;
- priv->tx_len = 0;
-
- ret = request_irq(spi->irq, mcp251x_can_isr,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
- if (ret) {
- dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
-
- mcp251x_hw_wakeup(spi);
- mcp251x_hw_reset(spi);
- ret = mcp251x_setup(net, priv, spi);
- if (ret) {
- free_irq(spi->irq, net);
- mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
- mcp251x_set_normal_mode(spi);
- netif_wake_queue(net);
-
- return 0;
+ pdata->transceiver_enable(0);
+ close_candev(net);
}
static int mcp251x_stop(struct net_device *net)
@@ -706,17 +642,19 @@ static int mcp251x_stop(struct net_device *net)
close_candev(net);
+ priv->force_quit = 1;
+ free_irq(spi->irq, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->mcp_lock);
+
/* Disable and clear pending interrupts */
mcp251x_write_reg(spi, CANINTE, 0x00);
mcp251x_write_reg(spi, CANINTF, 0x00);
- priv->force_quit = 1;
- free_irq(spi->irq, net);
- flush_workqueue(priv->wq);
-
mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
+ mcp251x_clean(net);
mcp251x_hw_sleep(spi);
@@ -725,9 +663,27 @@ static int mcp251x_stop(struct net_device *net)
priv->can.state = CAN_STATE_STOPPED;
+ mutex_unlock(&priv->mcp_lock);
+
return 0;
}
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_err_skb(net, &frame);
+ if (skb) {
+ frame->can_id = can_id;
+ frame->data[1] = data1;
+ netif_rx(skb);
+ } else {
+ dev_err(&net->dev,
+ "cannot allocate error skb\n");
+ }
+}
+
static void mcp251x_tx_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
@@ -736,33 +692,32 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
struct net_device *net = priv->net;
struct can_frame *frame;
+ mutex_lock(&priv->mcp_lock);
if (priv->tx_skb) {
- frame = (struct can_frame *)priv->tx_skb->data;
-
if (priv->can.state == CAN_STATE_BUS_OFF) {
mcp251x_clean(net);
- netif_wake_queue(net);
- return;
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+ if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ mcp251x_hw_tx(spi, frame, 0);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
}
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
- mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->can_dlc;
- can_put_echo_skb(priv->tx_skb, net, 0);
- priv->tx_skb = NULL;
}
+ mutex_unlock(&priv->mcp_lock);
}
-static void mcp251x_irq_work_handler(struct work_struct *ws)
+static void mcp251x_restart_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
- irq_work);
+ restart_work);
struct spi_device *spi = priv->spi;
struct net_device *net = priv->net;
- u8 txbnctrl;
- u8 intf;
- enum can_state new_state;
+ mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mdelay(10);
mcp251x_hw_reset(spi);
@@ -771,45 +726,54 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
netif_device_attach(net);
- /* Clean since we lost tx buffer */
- if (priv->tx_skb || priv->tx_len) {
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
+ mcp251x_clean(net);
mcp251x_set_normal_mode(spi);
+ netif_wake_queue(net);
} else {
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
+ priv->force_quit = 0;
}
- if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
- return;
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ mcp251x_write_reg(spi, TXBCTRL(0), 0);
+ mcp251x_clean(net);
+ netif_wake_queue(net);
+ mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+}
- while (!priv->force_quit && !freezing(current)) {
- u8 eflag = mcp251x_read_reg(spi, EFLG);
- int can_id = 0, data1 = 0;
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+ struct mcp251x_priv *priv = dev_id;
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
- mcp251x_write_reg(spi, EFLG, 0x00);
+ mutex_lock(&priv->mcp_lock);
+ while (!priv->force_quit) {
+ enum can_state new_state;
+ u8 intf = mcp251x_read_reg(spi, CANINTF);
+ u8 eflag;
+ int can_id = 0, data1 = 0;
- if (priv->restart_tx) {
- priv->restart_tx = 0;
- mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- can_id |= CAN_ERR_RESTARTED;
+ if (intf & CANINTF_RX0IF) {
+ mcp251x_hw_rx(spi, 0);
+ /* Free one buffer ASAP */
+ mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
+ 0x00);
}
- if (priv->wake) {
- /* Wait whilst the device wakes up */
- mdelay(10);
- priv->wake = 0;
- }
+ if (intf & CANINTF_RX1IF)
+ mcp251x_hw_rx(spi, 1);
- intf = mcp251x_read_reg(spi, CANINTF);
mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+ eflag = mcp251x_read_reg(spi, EFLG);
+ mcp251x_write_reg(spi, EFLG, 0x00);
+
/* Update can state */
if (eflag & EFLG_TXBO) {
new_state = CAN_STATE_BUS_OFF;
@@ -850,59 +814,31 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
}
priv->can.state = new_state;
- if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
- struct sk_buff *skb;
- struct can_frame *frame;
-
- /* Create error frame */
- skb = alloc_can_err_skb(net, &frame);
- if (skb) {
- /* Set error frame flags based on bus state */
- frame->can_id = can_id;
- frame->data[1] = data1;
-
- /* Update net stats for overflows */
- if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
- if (eflag & EFLG_RX0OVR)
- net->stats.rx_over_errors++;
- if (eflag & EFLG_RX1OVR)
- net->stats.rx_over_errors++;
- frame->can_id |= CAN_ERR_CRTL;
- frame->data[1] |=
- CAN_ERR_CRTL_RX_OVERFLOW;
- }
-
- netif_rx(skb);
- } else {
- dev_info(&spi->dev,
- "cannot allocate error skb\n");
+ if (intf & CANINTF_ERRIF) {
+ /* Handle overflow counters */
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+ if (eflag & EFLG_RX0OVR)
+ net->stats.rx_over_errors++;
+ if (eflag & EFLG_RX1OVR)
+ net->stats.rx_over_errors++;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
}
+ mcp251x_error_skb(net, can_id, data1);
}
if (priv->can.state == CAN_STATE_BUS_OFF) {
if (priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
can_bus_off(net);
mcp251x_hw_sleep(spi);
- return;
+ break;
}
}
if (intf == 0)
break;
- if (intf & CANINTF_WAKIF)
- complete(&priv->awake);
-
- if (intf & CANINTF_MERRF) {
- /* If there are pending Tx buffers, restart queue */
- txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
- if (!(txbnctrl & TXBCTRL_TXREQ)) {
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
- }
-
if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
@@ -913,12 +849,66 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
netif_wake_queue(net);
}
- if (intf & CANINTF_RX0IF)
- mcp251x_hw_rx(spi, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+ return IRQ_HANDLED;
+}
- if (intf & CANINTF_RX1IF)
- mcp251x_hw_rx(spi, 1);
+static int mcp251x_open(struct net_device *net)
+{
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+ struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret) {
+ dev_err(&spi->dev, "unable to set initial baudrate!\n");
+ return ret;
}
+
+ mutex_lock(&priv->mcp_lock);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(1);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+
+ ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+ IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+ close_candev(net);
+ goto open_unlock;
+ }
+
+ priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+ INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+ ret = mcp251x_hw_reset(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_setup(net, priv, spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_set_normal_mode(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ netif_wake_queue(net);
+
+open_unlock:
+ mutex_unlock(&priv->mcp_lock);
+ return ret;
}
static const struct net_device_ops mcp251x_netdev_ops = {
@@ -952,11 +942,13 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->net = net;
dev_set_drvdata(&spi->dev, priv);
priv->spi = spi;
- mutex_init(&priv->spi_lock);
+ mutex_init(&priv->mcp_lock);
/* If requested, allocate DMA buffers */
if (mcp251x_enable_dma) {
@@ -990,7 +982,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
goto error_tx_buf;
}
priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
- if (!priv->spi_tx_buf) {
+ if (!priv->spi_rx_buf) {
ret = -ENOMEM;
goto error_rx_buf;
}
@@ -1005,18 +997,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
-
- INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
- INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
-
- init_completion(&priv->awake);
-
/* Configure the SPI bus */
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
+ /* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
dev_info(&spi->dev, "Probe failed\n");
goto error_probe;
@@ -1059,10 +1045,6 @@ static int __devexit mcp251x_can_remove(struct spi_device *spi)
unregister_candev(net);
free_candev(net);
- priv->force_quit = 1;
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
@@ -1084,6 +1066,12 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
+ priv->force_quit = 1;
+ disable_irq(spi->irq);
+ /*
+ * Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
if (netif_running(net)) {
netif_device_detach(net);
@@ -1110,16 +1098,18 @@ static int mcp251x_can_resume(struct spi_device *spi)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
pdata->power_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
if (pdata->transceiver_enable)
pdata->transceiver_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
}
}
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
return 0;
}
#else
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375d..27d1d398e25e 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
config CAN_MPC5XXX
tristate "Freescale MPC5xxx onboard CAN controller"
- depends on PPC_MPC52xx
+ depends on (PPC_MPC52xx || PPC_MPC512x)
---help---
If you say yes here you get support for Freescale's MPC5xxx
- onboard CAN controller.
+ onboard CAN controller. Currently, the MPC5200, MPC5200B and
+ MPC5121 (Rev. 2 and later) are supported.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called mscan-mpc5xxx.ko.
endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b16..03e7c48465a2 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mpc52xx.h>
@@ -36,22 +37,21 @@
#define DRV_NAME "mpc5xxx_can"
-static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+struct mpc5xxx_can_data {
+ unsigned int type;
+ u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-/*
- * Get frequency of the MSCAN clock source
- *
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
- * can be selected. According to the MPC5200 user's manual, the oscillator
- * clock is the better choice as it has less jitter but due to a hardware
- * bug, it can not be selected for the old MPC5200 Rev. A chips.
- */
-
-static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
- int clock_src)
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
pvr = mfspr(SPRN_PVR);
- freq = mpc5xxx_get_bus_frequency(of->node);
+ /*
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+ * (IP_CLK) can be selected as MSCAN clock source. According to
+ * the MPC5200 user's manual, the oscillator clock is the better
+ * choice as it has less jitter. For this reason, it is selected
+ * by default. Unfortunately, it can not be selected for the old
+ * MPC5200 Rev. A chips due to a hardware bug (check errata).
+ */
+ if (clock_name && strcmp(clock_name, "ip") == 0)
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+ else
+ *mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
if (!freq)
return 0;
- if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+ if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
return freq;
/* Determine SYS_XTAL_IN frequency from the clock domain settings */
np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
if (!np_cdm) {
- dev_err(&of->dev, "can't get clock node!\n");
+ dev_err(&ofdev->dev, "can't get clock node!\n");
return 0;
}
cdm = of_iomap(np_cdm, 0);
- of_node_put(np_cdm);
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
freq *= (val & (1 << 5)) ? 8 : 4;
freq /= (val & (1 << 6)) ? 12 : 16;
+ of_node_put(np_cdm);
iounmap(cdm);
return freq;
}
+#else /* !CONFIG_PPC_MPC52xx */
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+ u32 spmr; /* System PLL Mode Reg */
+ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
+ u32 scfr1; /* System Clk Freq Reg 1 */
+ u32 scfr2; /* System Clk Freq Reg 2 */
+ u32 reserved;
+ u32 bcr; /* Bread Crumb Reg */
+ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
+ u32 spccr; /* SPDIF Clk Ctrl Reg */
+ u32 cccr; /* CFM Clk Ctrl Reg */
+ u32 dccr; /* DIU Clk Cnfg Reg */
+ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+ { .compatible = "fsl,mpc5121-clock", },
+ {}
+};
+
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ struct mpc512x_clockctl __iomem *clockctl;
+ struct device_node *np_clock;
+ struct clk *sys_clk, *ref_clk;
+ int plen, clockidx, clocksrc = -1;
+ u32 sys_freq, val, clockdiv = 1, freq = 0;
+ const u32 *pval;
+
+ np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+ if (!np_clock) {
+ dev_err(&ofdev->dev, "couldn't find clock node\n");
+ return -ENODEV;
+ }
+ clockctl = of_iomap(np_clock, 0);
+ if (!clockctl) {
+ dev_err(&ofdev->dev, "couldn't map clock registers\n");
+ return 0;
+ }
+
+ /* Determine the MSCAN device index from the physical address */
+ pval = of_get_property(ofdev->node, "reg", &plen);
+ BUG_ON(!pval || plen < sizeof(*pval));
+ clockidx = (*pval & 0x80) ? 1 : 0;
+ if (*pval & 0x2000)
+ clockidx += 2;
+
+ /*
+ * Clock source and divider selection: 3 different clock sources
+ * can be selected: "ip", "ref" or "sys". For the latter two, a
+ * clock divider can be defined as well. If the clock source is
+ * not specified by the device tree, we first try to find an
+ * optimal CAN source clock based on the system clock. If that
+ * is not posslible, the reference clock will be used.
+ */
+ if (clock_name && !strcmp(clock_name, "ip")) {
+ *mscan_clksrc = MSCAN_CLKSRC_IPS;
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
+ } else {
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+ pval = of_get_property(ofdev->node,
+ "fsl,mscan-clock-divider", &plen);
+ if (pval && plen == sizeof(*pval))
+ clockdiv = *pval;
+ if (!clockdiv)
+ clockdiv = 1;
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ if (!sys_clk) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+ /* Get and round up/down sys clock rate */
+ sys_freq = 1000000 *
+ ((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+ if (!clock_name) {
+ /* A multiple of 16 MHz would be optimal */
+ if ((sys_freq % 16000000) == 0) {
+ clocksrc = 0;
+ clockdiv = sys_freq / 16000000;
+ freq = sys_freq / clockdiv;
+ }
+ } else {
+ clocksrc = 0;
+ freq = sys_freq / clockdiv;
+ }
+ }
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ if (!ref_clk) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+ clocksrc = 1;
+ freq = clk_get_rate(ref_clk) / clockdiv;
+ }
+ }
+
+ /* Disable clock */
+ out_be32(&clockctl->mccr[clockidx], 0x0);
+ if (clocksrc >= 0) {
+ /* Set source and divider */
+ val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+ out_be32(&clockctl->mccr[clockidx], val);
+ /* Enable clock */
+ out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+ }
+
+ /* Enable MSCAN clock domain */
+ val = in_be32(&clockctl->sccr[1]);
+ if (!(val & (1 << 25)))
+ out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+ dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+ *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+ clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+ of_node_put(np_clock);
+ iounmap(clockctl);
+
+ return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
struct device_node *np = ofdev->node;
struct net_device *dev;
struct mscan_priv *priv;
void __iomem *base;
- const char *clk_src;
- int err, irq, clock_src;
+ const char *clock_name = NULL;
+ int irq, mscan_clksrc = 0;
+ int err = -ENOMEM;
- base = of_iomap(ofdev->node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
- err = -ENOMEM;
- goto exit_release_mem;
+ return err;
}
irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
}
dev = alloc_mscandev();
- if (!dev) {
- err = -ENOMEM;
+ if (!dev)
goto exit_dispose_irq;
- }
priv = netdev_priv(dev);
priv->reg_base = base;
dev->irq = irq;
- /*
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
- * (IP_CLK) can be selected as MSCAN clock source. According to
- * the MPC5200 user's manual, the oscillator clock is the better
- * choice as it has less jitter. For this reason, it is selected
- * by default.
- */
- clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
- if (clk_src && strcmp(clk_src, "ip") == 0)
- clock_src = MSCAN_CLKSRC_BUS;
- else
- clock_src = MSCAN_CLKSRC_XTAL;
- priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+ clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+ BUG_ON(!data);
+ priv->type = data->type;
+ priv->can.clock.freq = data->get_clock(ofdev, clock_name,
+ &mscan_clksrc);
if (!priv->can.clock.freq) {
- dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
- err = -ENODEV;
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
}
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = register_mscandev(dev, clock_src);
+ err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
-exit_release_mem:
+
return err;
}
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
}
#endif
+static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+ .type = MSCAN_TYPE_MPC5200,
+ .get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+ .type = MSCAN_TYPE_MPC5121,
+ .get_clock = mpc512x_can_get_clock,
+};
+
static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
- {.compatible = "fsl,mpc5200-mscan"},
+ { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+ /* Note that only MPC5121 Rev. 2 (and later) is supported */
+ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
module_exit(mpc5xxx_can_exit);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca6..6b7dd578d417 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
priv->shadow_canrier = 0;
priv->flags = 0;
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ /* Clear pending bus-off condition */
+ if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ }
+
err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
if (err)
return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
out_8(&regs->cantier, 0);
/* Enable receive interrupts. */
- out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
- MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+
+ return 0;
+}
+
+static int mscan_restart(struct net_device *dev)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
+ "bus-off state expected");
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ /* Re-enable receive interrupts. */
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+ } else {
+ if (priv->can.state <= CAN_STATE_BUS_OFF)
+ mscan_set_mode(dev, MSCAN_INIT_MODE);
+ return mscan_start(dev);
+ }
return 0;
}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (frame->can_dlc > 8)
- return -EINVAL;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
* automatically. To avoid that we stop the chip doing
* a light-weight stop (we are in irq-context).
*/
- out_8(&regs->cantier, 0);
- out_8(&regs->canrier, 0);
- setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+ if (priv->type != MSCAN_TYPE_MPC5121) {
+ out_8(&regs->cantier, 0);
+ out_8(&regs->canrier, 0);
+ setbits8(&regs->canctl0,
+ MSCAN_SLPRQ | MSCAN_INITRQ);
+ }
can_bus_off(dev);
break;
default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
- if (priv->can.state <= CAN_STATE_BUS_OFF)
- mscan_set_mode(dev, MSCAN_INIT_MODE);
- ret = mscan_start(dev);
+ ret = mscan_restart(dev);
if (ret)
break;
if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_start_xmit = mscan_start_xmit,
};
-int register_mscandev(struct net_device *dev, int clock_src)
+int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
- if (clock_src)
+ if (mscan_clksrc)
ctl1 |= MSCAN_CLKSRC;
else
ctl1 &= ~MSCAN_CLKSRC;
+ if (priv->type == MSCAN_TYPE_MPC5121)
+ ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed8..4ff966473bc9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
#define MSCAN_CLKSRC 0x40
#define MSCAN_LOOPB 0x20
#define MSCAN_LISTEN 0x10
+#define MSCAN_BORM 0x08
#define MSCAN_WUPM 0x04
#define MSCAN_SLPAK 0x02
#define MSCAN_INITAK 0x01
-/* Use the MPC5200 MSCAN variant? */
+/* Use the MPC5XXX MSCAN variant? */
#ifdef CONFIG_PPC
-#define MSCAN_FOR_MPC5200
+#define MSCAN_FOR_MPC5XXX
#endif
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define MSCAN_CLKSRC_BUS 0
#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
#else
#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
#define MSCAN_EFF_RTR_SHIFT 0
#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
#define _MSCAN_RESERVED_DSR_SIZE 2
#else
@@ -165,67 +167,66 @@ struct mscan_regs {
u8 cantbsel; /* + 0x14 0x0a */
u8 canidac; /* + 0x15 0x0b */
u8 reserved; /* + 0x16 0x0c */
- _MSCAN_RESERVED_(6, 5); /* + 0x17 */
-#ifndef MSCAN_FOR_MPC5200
- u8 canmisc; /* 0x0d */
-#endif
+ _MSCAN_RESERVED_(6, 2); /* + 0x17 */
+ u8 canmisc; /* + 0x19 0x0d */
+ _MSCAN_RESERVED_(7, 2); /* + 0x1a */
u8 canrxerr; /* + 0x1c 0x0e */
u8 cantxerr; /* + 0x1d 0x0f */
- _MSCAN_RESERVED_(7, 2); /* + 0x1e */
+ _MSCAN_RESERVED_(8, 2); /* + 0x1e */
u16 canidar1_0; /* + 0x20 0x10 */
- _MSCAN_RESERVED_(8, 2); /* + 0x22 */
+ _MSCAN_RESERVED_(9, 2); /* + 0x22 */
u16 canidar3_2; /* + 0x24 0x12 */
- _MSCAN_RESERVED_(9, 2); /* + 0x26 */
+ _MSCAN_RESERVED_(10, 2); /* + 0x26 */
u16 canidmr1_0; /* + 0x28 0x14 */
- _MSCAN_RESERVED_(10, 2); /* + 0x2a */
+ _MSCAN_RESERVED_(11, 2); /* + 0x2a */
u16 canidmr3_2; /* + 0x2c 0x16 */
- _MSCAN_RESERVED_(11, 2); /* + 0x2e */
+ _MSCAN_RESERVED_(12, 2); /* + 0x2e */
u16 canidar5_4; /* + 0x30 0x18 */
- _MSCAN_RESERVED_(12, 2); /* + 0x32 */
+ _MSCAN_RESERVED_(13, 2); /* + 0x32 */
u16 canidar7_6; /* + 0x34 0x1a */
- _MSCAN_RESERVED_(13, 2); /* + 0x36 */
+ _MSCAN_RESERVED_(14, 2); /* + 0x36 */
u16 canidmr5_4; /* + 0x38 0x1c */
- _MSCAN_RESERVED_(14, 2); /* + 0x3a */
+ _MSCAN_RESERVED_(15, 2); /* + 0x3a */
u16 canidmr7_6; /* + 0x3c 0x1e */
- _MSCAN_RESERVED_(15, 2); /* + 0x3e */
+ _MSCAN_RESERVED_(16, 2); /* + 0x3e */
struct {
u16 idr1_0; /* + 0x40 0x20 */
- _MSCAN_RESERVED_(16, 2); /* + 0x42 */
+ _MSCAN_RESERVED_(17, 2); /* + 0x42 */
u16 idr3_2; /* + 0x44 0x22 */
- _MSCAN_RESERVED_(17, 2); /* + 0x46 */
+ _MSCAN_RESERVED_(18, 2); /* + 0x46 */
u16 dsr1_0; /* + 0x48 0x24 */
- _MSCAN_RESERVED_(18, 2); /* + 0x4a */
+ _MSCAN_RESERVED_(19, 2); /* + 0x4a */
u16 dsr3_2; /* + 0x4c 0x26 */
- _MSCAN_RESERVED_(19, 2); /* + 0x4e */
+ _MSCAN_RESERVED_(20, 2); /* + 0x4e */
u16 dsr5_4; /* + 0x50 0x28 */
- _MSCAN_RESERVED_(20, 2); /* + 0x52 */
+ _MSCAN_RESERVED_(21, 2); /* + 0x52 */
u16 dsr7_6; /* + 0x54 0x2a */
- _MSCAN_RESERVED_(21, 2); /* + 0x56 */
+ _MSCAN_RESERVED_(22, 2); /* + 0x56 */
u8 dlr; /* + 0x58 0x2c */
- u8:8; /* + 0x59 0x2d */
- _MSCAN_RESERVED_(22, 2); /* + 0x5a */
+ u8 reserved; /* + 0x59 0x2d */
+ _MSCAN_RESERVED_(23, 2); /* + 0x5a */
u16 time; /* + 0x5c 0x2e */
} rx;
- _MSCAN_RESERVED_(23, 2); /* + 0x5e */
+ _MSCAN_RESERVED_(24, 2); /* + 0x5e */
struct {
u16 idr1_0; /* + 0x60 0x30 */
- _MSCAN_RESERVED_(24, 2); /* + 0x62 */
+ _MSCAN_RESERVED_(25, 2); /* + 0x62 */
u16 idr3_2; /* + 0x64 0x32 */
- _MSCAN_RESERVED_(25, 2); /* + 0x66 */
+ _MSCAN_RESERVED_(26, 2); /* + 0x66 */
u16 dsr1_0; /* + 0x68 0x34 */
- _MSCAN_RESERVED_(26, 2); /* + 0x6a */
+ _MSCAN_RESERVED_(27, 2); /* + 0x6a */
u16 dsr3_2; /* + 0x6c 0x36 */
- _MSCAN_RESERVED_(27, 2); /* + 0x6e */
+ _MSCAN_RESERVED_(28, 2); /* + 0x6e */
u16 dsr5_4; /* + 0x70 0x38 */
- _MSCAN_RESERVED_(28, 2); /* + 0x72 */
+ _MSCAN_RESERVED_(29, 2); /* + 0x72 */
u16 dsr7_6; /* + 0x74 0x3a */
- _MSCAN_RESERVED_(29, 2); /* + 0x76 */
+ _MSCAN_RESERVED_(30, 2); /* + 0x76 */
u8 dlr; /* + 0x78 0x3c */
u8 tbpr; /* + 0x79 0x3d */
- _MSCAN_RESERVED_(30, 2); /* + 0x7a */
+ _MSCAN_RESERVED_(31, 2); /* + 0x7a */
u16 time; /* + 0x7c 0x3e */
} tx;
- _MSCAN_RESERVED_(31, 2); /* + 0x7e */
+ _MSCAN_RESERVED_(32, 2); /* + 0x7e */
} __attribute__ ((packed));
#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
#define MSCAN_SET_MODE_RETRIES 255
#define MSCAN_ECHO_SKB_MAX 3
+#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
+ MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
+ MSCAN_TSTATE1 | MSCAN_TSTATE0)
+
+/* MSCAN type variants */
+enum {
+ MSCAN_TYPE_MPC5200,
+ MSCAN_TYPE_MPC5121
+};
#define BTR0_BRP_MASK 0x3f
#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
+ unsigned int type; /* MSCAN type variants */
long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
};
extern struct net_device *alloc_mscandev(void);
-/*
- * clock_src:
- * 1 = The MSCAN clock source is the onchip Bus Clock.
- * 0 = The MSCAN clock source is the chip Oscillator Clock.
- */
-extern int register_mscandev(struct net_device *dev, int clock_src);
+extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
extern void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f247..9e277d64a318 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_PLX_PCI
+ tristate "PLX90xx PCI-bridge based Cards"
+ depends on PCI
+ ---help---
+ This driver is for CAN interface cards based on
+ the PLX90xx PCI bridge.
+ Driver supports now:
+ - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+ - Adlink PCI-7841/cPCI-7841 SE card
+ - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+ - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac03965..ce924553995d 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..87300606abb9 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-static struct pci_device_id ems_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
#define KVASER_PCI_DEVICE_ID2 0x0008
-static struct pci_device_id kvaser_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
{KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
{KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
{ 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 000000000000..6b46a6395f80
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_plx_pci"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
+ "the SJA1000 chips");
+MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
+ "Adlink PCI-7841/cPCI-7841 SE, "
+ "Marathon CAN-bus-PCI, "
+ "TEWS TECHNOLOGIES TPMC810");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+ int channels; /* detected channels count */
+ struct net_device *net_dev[PLX_PCI_MAX_CHAN];
+ void __iomem *conf_addr;
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX90xx registers */
+#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
+#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
+ * Serial EEPROM, and Initialization
+ * Control register
+ */
+
+#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
+#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
+#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define REG_CR 0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL 0x21
+#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
+#define REG_SR_BASICCAN_INITIAL 0x0c
+#define REG_IR_BASICCAN_INITIAL 0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL 0x01
+#define REG_SR_PELICAN_INITIAL 0x3c
+#define REG_IR_PELICAN_INITIAL 0x00
+
+#define ADLINK_PCI_VENDOR_ID 0x144A
+#define ADLINK_PCI_DEVICE_ID 0x7841
+
+#define MARATHON_PCI_DEVICE_ID 0x2715
+
+#define TEWS_PCI_VENDOR_ID 0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+ const char *name;
+ int channel_count;
+ u32 can_clock;
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+
+ /* Parameters for mapping local configuration space */
+ struct plx_pci_channel_map conf_map;
+
+ /* Parameters for mapping the SJA1000 chips */
+ struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841 SE", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
+ "Marathon CAN-bus-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+ &plx_pci_reset_marathon
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
+ "TEWS TECHNOLOGIES TPMC810", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
+ {
+ /* Adlink PCI-7841/cPCI-7841 */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink
+ },
+ {
+ /* Adlink PCI-7841/cPCI-7841 SE */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink_se
+ },
+ {
+ /* Marathon CAN-bus-PCI card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon
+ },
+ {
+ /* TEWS TECHNOLOGIES TPMC810 card */
+ TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_tews
+ },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
+{
+ iowrite8(val, priv->reg_base + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+{
+ int flag = 0;
+
+ /*
+ * Check registers after hardware reset (the Basic mode)
+ * See states on p. 10 of the Datasheet.
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+ (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+ /* Bring the SJA1000 into the PeliCAN mode*/
+ priv->write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /*
+ * Check registers after reset in the PeliCAN mode.
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+ return 0;
+}
+
+/*
+ * PLX90xx software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+ void __iomem *reset_addr;
+ int i;
+ int reset_bar[2] = {3, 5};
+
+ plx_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+ if (!reset_addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, reset_bar[i]);
+ } else {
+ /* reset the SJA1000 chip */
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, reset_addr);
+ }
+ }
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ int i = 0;
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "Removing %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ priv = netdev_priv(dev);
+ if (priv->reg_base)
+ pci_iounmap(pdev, priv->reg_base);
+ free_sja1000dev(dev);
+ }
+
+ plx_pci_reset_common(pdev);
+
+ /*
+ * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
+ * Local_2 interrupts
+ */
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+
+ if (card->conf_addr)
+ pci_iounmap(pdev, card->conf_addr);
+
+ kfree(card);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit plx_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct plx_pci_card *card;
+ struct plx_pci_card_info *ci;
+ int err, i;
+ u32 val;
+ void __iomem *addr;
+
+ ci = (struct plx_pci_card_info *)ent->driver_data;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+ ci->name, PCI_SLOT(pdev->devfn));
+
+ /* Allocate card structures to hold addresses, ... */
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ card->channels = 0;
+
+ /* Remap PLX90xx configuration space */
+ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap configuration space "
+ "(BAR%d)\n", ci->conf_map.bar);
+ goto failure_cleanup;
+ }
+ card->conf_addr = addr + ci->conf_map.offset;
+
+ ci->reset_func(pdev);
+
+ /* Detect available channels */
+ for (i = 0; i < ci->channel_count; i++) {
+ struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+
+ dev->irq = pdev->irq;
+
+ /*
+ * Remap IO space of the SJA1000 chips
+ * This is device-dependent mapping
+ */
+ addr = pci_iomap(pdev, cm->bar, cm->size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+ goto failure_cleanup;
+ }
+
+ priv->reg_base = addr + cm->offset;
+ priv->read_reg = plx_pci_read_reg;
+ priv->write_reg = plx_pci_write_reg;
+
+ /* Check if channel is present */
+ if (plx_pci_check_sja1000(priv)) {
+ priv->can.clock.freq = ci->can_clock;
+ priv->ocr = ci->ocr;
+ priv->cdr = ci->cdr;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed "
+ "(err=%d)\n", err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+ "registered as %s\n", i + 1, priv->reg_base,
+ dev->irq, dev->name);
+ } else {
+ dev_err(&pdev->dev, "Channel #%d not detected\n",
+ i + 1);
+ free_sja1000dev(dev);
+ }
+ }
+
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ /*
+ * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+ * Local_2 interrupts from the SJA1000 chips
+ */
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+
+ return 0;
+
+failure_cleanup:
+ dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+ plx_pci_del_card(pdev);
+
+ return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = plx_pci_tbl,
+ .probe = plx_pci_add_card,
+ .remove = plx_pci_del_card,
+};
+
+static int __init plx_pci_init(void)
+{
+ return pci_register_driver(&plx_pci_driver);
+}
+
+static void __exit plx_pci_exit(void)
+{
+ pci_unregister_driver(&plx_pci_driver);
+}
+
+module_init(plx_pci_init);
+module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b4..ace103a44833 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -249,6 +249,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
uint8_t dreg;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
fi = dlc = cf->can_dlc;
@@ -564,6 +567,7 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.bittiming_const = &sja1000_bittiming_const;
priv->can.do_set_bittiming = sja1000_set_bittiming;
priv->can.do_set_mode = sja1000_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da528..8332e242b0be 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -477,6 +477,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
mbxno = get_tx_head_mb(priv);
mbx_mask = BIT(mbxno);
spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +494,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = min_t(u8, cf->can_dlc, 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
data |= get_tx_head_prio(priv) << 8;
@@ -907,6 +909,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
priv->can.do_get_state = ti_hecc_get_state;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a15..97ff6febad63 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
- from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+ from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf4..11c87840cc00 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -1019,8 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.bittiming_const = &ems_usb_bittiming_const;
dev->can.do_set_bittiming = ems_usb_set_bittiming;
dev->can.do_set_mode = ems_usb_set_mode;
-
- netdev->flags |= IFF_ECHO; /* we support local echo */
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..d124d837ae58 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
#include <net/rtnetlink.h>
static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += cf->can_dlc;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
}
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e488..ad47e5126fde 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -106,7 +106,7 @@
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe09..f6462b54f823 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -334,7 +334,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
return adapter->params.is_asic;
}
-extern struct pci_device_id t1_pci_tbl[];
+extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bfe..2402d372c886 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
};
-struct pci_device_id t1_pci_tbl[] = {
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index af9321617ce4..0e79cef95c0a 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1325,8 +1325,7 @@ net_open(struct net_device *dev)
write_irq(dev, lp->chip_type, dev->irq);
ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
if (ret) {
- if (net_debug)
- printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
+ printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
goto bad_out;
}
}
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..73622f5312cb 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static const struct pci_device_id cxgb3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
CH_DEVICE(0x20, 0), /* PE9000 */
CH_DEVICE(0x21, 1), /* T302E */
CH_DEVICE(0x22, 2), /* T310E */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..048205903741 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -480,6 +480,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= q->credits / 4) {
q->pend_cred = 0;
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
}
@@ -2079,6 +2080,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
struct skb_frag_struct *rx_frag;
@@ -2116,11 +2118,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!nr_frags) {
offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = sd->pg_chunk.va + 2;
- }
- len -= offset;
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((pi->rx_offload & T3_RX_CSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
- prefetch(qs->lro_va);
+ len -= offset;
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2145,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
return;
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
- struct net_device *dev = qs->netdev;
- struct port_info *pi = netdev_priv(dev);
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
@@ -2282,11 +2287,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
while (likely(budget_left && is_new_response(r, q))) {
int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
struct sk_buff *skb = NULL;
- u32 len, flags = ntohl(r->flags);
- __be32 rss_hi = *(const __be32 *)r,
- rss_lo = r->rss_hdr.rss_hash_val;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+ rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2497,7 +2505,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
- } while (is_new_response(r, q) && is_pure_response(r));
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2531,6 +2542,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
+ rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 34e03104c3c1..faffad409985 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2672,8 +2672,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
- for regs\n");
+ dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
@@ -2711,6 +2710,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ clk_enable(emac_clk);
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
@@ -2720,7 +2721,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
goto netdev_reg_err;
}
- clk_enable(emac_clk);
/* MII/Phy intialisation, mdio bus registration */
emac_mii = mdiobus_alloc();
@@ -2760,6 +2760,7 @@ mdiobus_quit:
netdev_reg_err:
mdio_alloc_err:
+ clk_disable(emac_clk);
no_irq_res:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a3..98da085445e6 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
- "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "" : "I/O ",
- (long long)bar_start, dev->irq,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ (long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
@@ -3631,7 +3628,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
const struct pci_device_id *);
static void __devexit dfx_pci_unregister(struct pci_dev *);
-static struct pci_device_id dfx_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca8..7caab3d26a9e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
driver_data Data private to the driver.
*/
-static const struct pci_device_id rio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
{ }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d3..5c7a155e849a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
-static struct pci_device_id e100_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df3ea71..9902b33b7160 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -326,6 +326,8 @@ struct e1000_adapter {
/* for ioport free */
int bars;
int need_ioport;
+
+ bool discarding;
};
enum e1000_state_t {
@@ -347,6 +349,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 13e9ece16889..c67e93117271 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -215,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
return 0;
}
+static u32 e1000_get_link(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ adapter->hw.get_link_status = 1;
+
+ return e1000_has_link(adapter);
+}
+
static void e1000_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1892,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..3b14dd718ab4 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
* Macro expands to...
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -847,6 +847,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_pci_reg;
pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -1698,18 +1701,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case E1000_RXBUFFER_256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -2139,7 +2130,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- if (netdev->uc.count > rar_entries - 1) {
+ if (netdev_uc_count(netdev) > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
@@ -2162,7 +2153,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*/
i = 1;
if (use_uc)
- list_for_each_entry(ha, &netdev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
if (i == rar_entries)
break;
e1000_rar_set(hw, ha->addr, i++);
@@ -2258,7 +2249,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
}
}
-static bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -2802,13 +2793,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
@@ -3176,13 +3167,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
- if (max_frame <= E1000_RXBUFFER_256)
- adapter->rx_buffer_len = E1000_RXBUFFER_256;
- else if (max_frame <= E1000_RXBUFFER_512)
- adapter->rx_buffer_len = E1000_RXBUFFER_512;
- else if (max_frame <= E1000_RXBUFFER_1024)
- adapter->rx_buffer_len = E1000_RXBUFFER_1024;
- else if (max_frame <= E1000_RXBUFFER_2048)
+ if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
else
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3835,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+ * packet, if thats the case we need to toss it. In fact, we
+ * to toss every packet with the EOP bit clear and the next
+ * frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->discarding = true;
+
+ if (adapter->discarding) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->discarding = false;
goto next_desc;
}
@@ -4605,6 +4599,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (adapter->need_ioport)
err = pci_enable_device(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b979464091bb..3c95acb3a87d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
@@ -265,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
switch (hw->mac.type) {
+ case e1000_82573:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ break;
case e1000_82574:
case e1000_82583:
+ func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
@@ -920,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- if (hw->mac.type == e1000_82571 &&
- hw->dev_spec.e82571.alt_mac_addr_is_present)
- e1000e_set_laa_state_82571(hw, true);
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1223,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
- * e1000_update_mc_addr_list_82571 - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
- *
- * Updates the Receive Address Registers and Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
- **/
-static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count)
-{
- if (e1000e_get_laa_state_82571(hw))
- rar_count--;
-
- e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
-}
-
-/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1619,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
}
/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1693,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
- .update_mc_addr_list = e1000_update_mc_addr_list_82571,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
@@ -1704,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed4..db05ec355749 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd9079d53..c2ec095d2163 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,7 +459,7 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
@@ -502,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -516,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count);
+ u32 mc_addr_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -529,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
extern void e1000e_update_adaptive(struct e1000_hw *hw);
@@ -582,7 +584,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
-extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm(struct e1000_hw *hw);
-extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..27d21589a69a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
@@ -244,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
break;
}
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
return 0;
}
@@ -812,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- return 0;
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
}
/**
@@ -1338,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1401,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
}
static struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 0aa50c229c79..b33e3cbe9ab0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -202,7 +202,7 @@ static u32 e1000_get_link(struct net_device *netdev)
if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
- return e1000_has_link(adapter);
+ return e1000e_has_link(adapter);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..8bdcd5f24eff 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
#define E1000_FUNC_1 1
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
enum e1000_mac_type {
e1000_82571,
e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
- void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
};
/* Function pointers for the PHY. */
@@ -814,10 +819,15 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
+ bool adaptive_ifs;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -896,7 +906,6 @@ struct e1000_fc_info {
struct e1000_dev_spec_82571 {
bool laa_is_present;
- bool alt_mac_addr_is_present;
u32 smb_counter;
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..54d03a0ce3ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000e_get_phy_id(hw);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000e_get_phy_type_from_id(phy->id);
switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
+out:
return ret_val;
}
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count--;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* LED operations */
switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
return ret_val;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
if (((hw->phy.type == e1000_phy_82577) &&
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
mdelay(10);
+ /* Perform any necessary post-reset workarounds */
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
@@ -3300,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
.get_link_up_info = e1000_get_link_up_info_ich8lan,
/* led_on dependent on mac type */
/* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..2425ed11d5cc 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
struct e1000_adapter *adapter = hw->adapter;
- u32 status;
- u16 pcie_link_status, pci_header_type, cap_offset;
+ u16 pcie_link_status, cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
PCIE_LINK_WIDTH_SHIFT);
}
- pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
- &pci_header_type);
- if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
- status = er32(STATUS);
- bus->func = (status & E1000_STATUS_FUNC_MASK)
- >> E1000_STATUS_FUNC_SHIFT;
- } else {
- bus->func = 0;
- }
+ mac->ops.set_lan_id(hw);
return 0;
}
/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /*
+ * The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
* e1000_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to the HW structure
*
@@ -125,6 +151,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +160,70 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
- e1e_flush();
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ /* There is no Alternate MAC Address */
+ goto out;
+ }
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
}
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
}
/**
@@ -164,10 +249,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- rar_high |= E1000_RAH_AV;
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
}
/**
@@ -246,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
**/
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
- u32 i;
- u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
- if (!mcarray) {
- printk(KERN_ERR "multicast array memory allocation failed\n");
- return;
- }
-
- /*
- * Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = rar_used_count; i < rar_count; i++) {
- if (mc_addr_count) {
- e1000e_rar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
- e1e_flush();
- }
- }
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /* Load any remaining multicast addresses into the hash table. */
- for (; mc_addr_count > 0; mc_addr_count--) {
- u32 hash_value, hash_reg, hash_bit, mta;
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
- e_dbg("Hash value = 0x%03X\n", hash_value);
+
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
- mc_addr_list += ETH_ALEN;
- }
- /* write the hash table completely */
- for (i = 0; i < hw->mac.mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
e1e_flush();
- kfree(mcarray);
}
/**
@@ -1609,6 +1675,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
mac->current_ifs_val = 0;
mac->ifs_min_val = IFS_MIN;
mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1688,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
+out:
+ return;
}
/**
@@ -1630,6 +1703,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
mac->in_ifs_mode = true;
@@ -1650,6 +1728,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
+out:
+ return;
}
/**
@@ -2052,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
- * e1000e_read_mac_addr - Read device MAC address
+ * e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
-s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 offset, nvm_data, i;
- u16 mac_addr_offset = 0;
-
- if (hw->mac.type == e1000_82571) {
- /* Check for an alternate MAC address. An alternate MAC
- * address can be setup by pre-boot software and must be
- * treated like a permanent address and must override the
- * actual permanent MAC address.*/
- ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &mac_addr_offset);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (mac_addr_offset == 0xFFFF)
- mac_addr_offset = 0;
-
- if (mac_addr_offset) {
- if (hw->bus.func == E1000_FUNC_1)
- mac_addr_offset += ETH_ALEN/sizeof(u16);
-
- /* make sure we have a valid mac address here
- * before using it */
- ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
- &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (nvm_data & 0x0001)
- mac_addr_offset = 0;
- }
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
- if (mac_addr_offset)
- hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
- }
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
- for (i = 0; i < ETH_ALEN; i += 2) {
- offset = mac_addr_offset + (i >> 1);
- ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
- hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
- }
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
- /* Flip last bit of mac address if we're on second port */
- if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
- hw->mac.perm_addr[5] ^= 1;
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
@@ -2287,10 +2327,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
s32 ret_val, hdr_csum, csum;
u8 i, len;
+ hw->mac.tx_pkt_filtering = true;
+
/* No manageability, no filtering */
if (!e1000e_check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
/*
@@ -2298,9 +2340,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val != 0) {
+ if (ret_val) {
hw->mac.tx_pkt_filtering = false;
- return ret_val;
+ goto out;
}
/* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2361,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
- return 1;
+ goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
- hw->mac.tx_pkt_filtering = true;
- return 1;
+out:
+ return hw->mac.tx_pkt_filtering;
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..88d54d3efcef 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- /* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* see !EOP comment in other rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full "
"packet\n");
dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case 256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -2536,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates the Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this. Currently no func pointer
- * exists and all implementations are handled in the generic version of this
- * function.
**/
static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 rar_used_count,
- u32 rar_count)
+ u32 mc_addr_count)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
}
/**
@@ -2567,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
struct dev_mc_list *mc_ptr;
u8 *mta_list;
u32 rctl;
@@ -2593,31 +2589,25 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ mc_ptr->dmi_addr, ETH_ALEN);
- e1000_update_mc_addr_list(hw, mta_list, i, 1,
- mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
} else {
/*
* if we're called from probe, we might not have
* anything to do here, so clear out the list
*/
- e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, NULL, 0);
}
}
@@ -3315,24 +3305,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- adapter->stats.scc += phy_data;
+ if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+ adapter->stats.scc += phy_data;
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- adapter->stats.ecol += phy_data;
+ if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+ adapter->stats.ecol += phy_data;
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- adapter->stats.mcc += phy_data;
+ if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+ adapter->stats.mcc += phy_data;
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- adapter->stats.latecol += phy_data;
+ if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+ adapter->stats.latecol += phy_data;
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- adapter->stats.dc += phy_data;
+ if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+ adapter->stats.dc += phy_data;
} else {
adapter->stats.scc += er32(SCC);
adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3350,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- hw->mac.collision_delta = phy_data;
+ if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+ hw->mac.collision_delta = phy_data;
} else {
hw->mac.collision_delta = er32(COLC);
}
@@ -3372,8 +3362,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
- adapter->stats.tncrs += phy_data;
+ if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+ adapter->stats.tncrs += phy_data;
} else {
if ((hw->mac.type != e1000_82574) &&
(hw->mac.type != e1000_82583))
@@ -3477,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -3558,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
- link = e1000_has_link(adapter);
+ link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
e1000e_enable_receives(adapter);
goto link_up;
@@ -3781,7 +3771,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3962,13 +3952,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(adapter, buffer_info);;
}
@@ -4317,13 +4307,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* fragmented skbs
*/
- if (max_frame <= 256)
- adapter->rx_buffer_len = 256;
- else if (max_frame <= 512)
- adapter->rx_buffer_len = 512;
- else if (max_frame <= 1024)
- adapter->rx_buffer_len = 1024;
- else if (max_frame <= 2048)
+ if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;
@@ -4674,6 +4658,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
@@ -4825,6 +4810,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -5133,7 +5119,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_eeprom_checks(adapter);
- /* copy the MAC address out of the NVM */
+ /* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
e_err("NVM Read Error while reading MAC address\n");
@@ -5325,7 +5311,7 @@ static struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
- phy->ops.release(hw);
- }
-
return ret_val;
}
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
}
/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: true for slow mode, false for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = 0;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d87935ad1130..c81bc4b1816f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
/* Supported devices */
-static struct pci_device_id enic_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ 0, } /* end of table */
};
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec8..1f8b11449fad 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
};
-static struct pci_device_id epic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1b..f9d5ca078743 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
mmio = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mmio) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
mem = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mem) {
dev_err(&pdev->dev, "cannot request memory space\n");
ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = 0;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
- mmio->end - mmio->start + 1);
+ resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (netdev->mem_end) {
priv->membase = devm_ioremap_nocache(&pdev->dev,
- netdev->mem_start, mem->end - mem->start + 1);
+ netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589e..e6a98129d787 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1941,7 +1941,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static struct pci_device_id fealnx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804a..3eb713b014f9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6198,7 +6198,7 @@ static void nv_shutdown(struct pci_dev *pdev)
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 25fabb3eedc5..d5160edf2fcf 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -46,6 +46,11 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mdio __iomem *regs;
+};
+
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
@@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
- return (void __iomem __force *)bus->priv;
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ return priv->regs;
}
/*
@@ -266,6 +273,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
{
struct device_node *np = ofdev->node;
struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
@@ -274,14 +282,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
u64 addr = 0, size = 0;
int err = 0;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
new_bus = mdiobus_alloc();
if (NULL == new_bus)
- return -ENOMEM;
+ goto err_free_priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
+ new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
/* Set the PHY base address */
@@ -291,6 +304,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err = -ENOMEM;
goto err_free_bus;
}
+ priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
@@ -298,8 +312,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
-
- new_bus->priv = (void __force *)regs;
+ priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -392,10 +405,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
- iounmap(regs);
+ iounmap(priv->map);
err_free_bus:
kfree(new_bus);
-
+err_free_priv:
+ kfree(priv);
return err;
}
@@ -404,14 +418,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(device, NULL);
- iounmap(fsl_pq_mdio_get_regs(bus));
+ iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
+ kfree(priv);
return 0;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e0620d084644..8bd3c9f17532 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_select_queue = gfar_select_queue,
.ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
return priv->vlgrp || priv->rx_csum_enable;
}
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
- skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
- if (amount_pull)
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
+ }
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
-
+ skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a2..dd72c5025e6a 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
}
}
-static struct pci_device_id hamachi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ae5f11c8fc13..bdadf3e23c94 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -248,6 +248,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
struct bpqdev *bpq;
+ struct net_device *orig_dev;
int size;
/*
@@ -282,8 +283,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
bpq = netdev_priv(dev);
+ orig_dev = dev;
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
- dev->stats.tx_dropped++;
+ orig_dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e1..0c2f2e8b1c47 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
-static struct pci_device_id hp100_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3af112..052c74091d91 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -87,6 +87,7 @@ History:
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
/* copy out MAC address */
- for (z = 0; z < sizeof(dev->dev_addr); z++)
+ for (z = 0; z < ETH_ALEN; z++)
dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
/* print config */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e9194a88..c505b50d1fa3 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
- E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e9bb8d..3670a66401b8 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
- if (ret_val)
- goto out;
-
- /* Set number of link attempts before downshift */
- ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
- if (ret_val)
- goto out;
- phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
- ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
out:
return ret_val;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d5272650d..f771a6c08777 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0e128d..83cd0d7417d0 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
int err = 0;
if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
} else {
- switch (hw->mac.type) {
- case e1000_82575:
- wr32(E1000_MSIXBM(0),
- (E1000_EICR_RX_QUEUE0 |
- E1000_EICR_TX_QUEUE0 |
- E1000_EIMS_OTHER));
- break;
- case e1000_82580:
- case e1000_82576:
- wr32(E1000_IVAR0, E1000_IVAR_VALID);
- break;
- default:
- break;
- }
+ igb_assign_vector(adapter->q_vector[0], 0);
}
if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
}
if (adapter->msix_entries)
igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
rd32(E1000_ICR);
@@ -1306,13 +1296,8 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- if (mac->type < e1000_82576) {
- fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
- } else {
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
- fc->low_water = fc->high_water - 16;
- }
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
fc->current_mode = fc->requested_mode;
@@ -2910,12 +2895,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev->uc.count > rar_entries)
+ if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (netdev->uc.count && rar_entries) {
+ if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- list_for_each_entry(ha, &netdev->uc.list, list) {
+
+ netdev_for_each_uc_addr(ha, netdev) {
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
@@ -3427,7 +3413,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3589,6 +3575,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -3610,7 +3597,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
@@ -4110,6 +4096,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4733,6 +4722,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(q_vector);
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4772,6 +4764,9 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1326232c1d32..23ce07d3de08 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -2117,6 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = false;
buffer_info->dma = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
@@ -2126,6 +2127,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -2146,7 +2148,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2164,14 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
igbvf_put_txbuf(adapter, buffer_info);
}
@@ -2759,7 +2760,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
random_ether_addr(hw->mac.addr);
} else {
err = hw->mac.ops.read_mac_addr(hw);
@@ -2876,7 +2878,7 @@ static struct pci_error_handlers igbvf_err_handler = {
.resume = igbvf_io_resume,
};
-static struct pci_device_id igbvf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
{ } /* terminate list */
};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c2..81a4c5d30733 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
*/
}
-static struct pci_device_id ioc3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a0..49f35e2ed19f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -92,7 +92,7 @@ static const char *ipg_brand_name[] = {
"D-Link NIC IP1000A"
};
-static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f76384221422..af10e97345ce 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
comment "Dongle support"
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c8..e030d47e2793 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
# dongle drivers for SIR drivers
obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d5572..b7e6625ca75e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
-static struct pci_device_id toshoboe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 000000000000..d7c983dc91ad
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,823 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int i, index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ for (i = 0;
+ freq_table[i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ u32 freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = i;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0000, 0625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (!clk) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+ self->ndev->last_rx = jiffies;
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap)
+ goto open_err;
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ void __iomem *base;
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ self = netdev_priv(ndev);
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->membase = base;
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = __devexit_p(sh_sir_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_sir_init(void)
+{
+ return platform_driver_register(&sh_sir_driver);
+}
+
+static void __exit sh_sir_exit(void)
+{
+ platform_driver_unregister(&sh_sir_driver);
+}
+
+module_init(sh_sir_init);
+module_exit(sh_sir_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd5453..6533c010cf5c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
}
}
-static struct pci_device_id via_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76a..209d4bcfaced 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
static /* const */ char drivername[] = DRIVER_NAME;
-static struct pci_device_id vlsi_irda_table [] = {
+static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
{
.class = PCI_CLASS_WIRELESS_IRDA << 8,
.class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..c56ea69762cd 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 21b41f42b61c..8f81efb49169 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,8 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb535084..bffbe0d52d33 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,22 @@
#define IXGBE_MAX_RSC_INT_RATE 162760
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ int rar;
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -159,6 +175,7 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
+ int numa_node;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
@@ -171,7 +188,7 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_DCB,
- RING_F_VMDQ,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
@@ -277,7 +294,7 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
@@ -286,8 +303,10 @@ struct ixgbe_adapter {
u64 lsc_int;
/* RX */
- struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
@@ -323,13 +342,14 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
+#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,13 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ int node;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
};
enum ixbge_state_t {
@@ -440,6 +467,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 204177d78cec..3103f4165311 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 538340527aa6..d4ed6adb7975 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -31,6 +31,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = 0;
- u32 ctrl, ctrl_ext;
+ u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
msleep(50);
-
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 regindex;
+ u32 vlvf_index;
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt_ctl;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !first_empty_slot)
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
+ vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+ goto out;
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
+ /* find the vlanid or the first empty slot */
+ first_empty_slot = 0;
+
+ for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+ if (!bits && !first_empty_slot)
+ first_empty_slot = vlvf_index;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ vlvf_index = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ goto out;
}
+ }
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- }
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
} else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- }
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
}
+ }
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ /* if bits is non-zero then some pools/VFs are still
+ * using this VLAN ID. Force the VFTA entry to on */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ bits |= (1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
}
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
out:
return 0;
@@ -2655,4 +2660,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_82599,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 688b8ca5da32..eb49020903c1 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,7 +28,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/list.h>
#include <linux/netdevice.h>
#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @uc_list: the list of new addresses
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list)
+ struct net_device *netdev)
{
u32 i;
u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
}
/* Add the new addresses */
- list_for_each_entry(ha, uc_list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
hw_dbg(hw, " Adding the secondary addresses:\n");
ixgbe_add_uc_addr(hw, ha->addr, 0);
}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 27f3214bed2e..13606d4809c9 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list);
+ struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index a1562287342f..9aea4f04bbd2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 64a9fa15c059..5caafd4afbc3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f30263898ebc..f0e9279d4669 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index ebbe53c352a7..cc728fa092e2 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index ec8a252636d3..4f7a26ab411e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 9e5e2827e4af..0f3f791e1e1d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 3c7a79a7d7c6..dd4883f642be 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
- adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
adapter->dcb_set_bitmap |= BIT_RESETLINK;
}
}
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (!adapter->dcb_set_bitmap)
return DCB_NO_HW_CHG;
+ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices);
+
+ if (ret)
+ return DCB_NO_HW_CHG;
+
/*
* Only take down the adapter if the configuration change
* requires a reset.
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
- ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
- if (ret) {
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return DCB_NO_HW_CHG;
- }
-
if (adapter->dcb_cfg.pfc_mode_enable) {
if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
(adapter->hw.fc.current_mode != ixgbe_fc_pfc))
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3b9f65..07a9410c08d4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -834,8 +834,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = adapter->tx_ring;
- struct ixgbe_ring *rx_ring = adapter->rx_ring;
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +867,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */
return 0;
}
@@ -878,25 +878,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
- goto err_setup;
+ goto clear_reset;
}
- temp_tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
if (!temp_tx_ring) {
err = -ENOMEM;
- goto err_setup;
+ goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]);
@@ -904,28 +903,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) {
i--;
ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ &temp_tx_ring[i]);
}
- goto err_setup;
+ goto clear_reset;
}
}
need_update = true;
}
- temp_rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if ((!temp_rx_ring) && (need_update)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
- kfree(temp_tx_ring);
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
- memcpy(temp_rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]);
@@ -947,22 +942,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = temp_tx_ring;
- temp_tx_ring = NULL;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter,
+ adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = temp_rx_ring;
- temp_rx_ring = NULL;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter,
+ adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
ixgbe_up(adapter);
}
+
+ vfree(temp_rx_ring);
err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -1007,13 +1012,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
@@ -1627,7 +1632,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0].reg_idx;
+ int j = adapter->rx_ring[0]->reg_idx;
u32 k;
for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw,
@@ -1867,11 +1872,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
ixgbe_reset(adapter);
DPRINTK(HW, INFO, "loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+skip_loopback:
ixgbe_reset(adapter);
clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -2000,7 +2016,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2053,7 +2069,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index da32a108a7b4..4123dec0dfb7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index de8ff53187da..abf4b2b3f252 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd64387563f0..2a3c8315e357 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,6 +45,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -52,7 +53,7 @@ static const char ixgbe_driver_string[] =
#define DRV_VERSION "2.0.44-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -262,10 +305,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
tc = reg_idx >> 2;
txoff = IXGBE_TFCS_TXOFF0;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
tc = 0;
txoff = IXGBE_TFCS_TXOFF;
if (dcb_i == 8) {
@@ -284,6 +329,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
tc += (reg_idx - 96) >> 4;
}
}
+ break;
+ default:
+ tc = 0;
}
txoff <<= tc;
}
@@ -446,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -474,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
@@ -508,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
@@ -984,7 +1032,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
@@ -994,7 +1042,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
@@ -1020,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1129,7 +1182,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1144,7 +1197,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1249,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
@@ -1263,7 +1319,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
@@ -1322,7 +1378,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1350,7 +1406,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1380,7 +1436,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1389,7 +1445,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1420,7 +1476,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1461,7 +1517,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
@@ -1477,7 +1533,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
@@ -1488,7 +1544,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
@@ -1521,7 +1577,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1706,8 +1762,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -1763,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1771,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
@@ -1812,10 +1875,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -1900,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1945,7 +2010,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1955,8 +2020,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
@@ -1984,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2054,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2085,7 +2168,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2140,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2152,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2179,7 +2266,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2189,7 +2276,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2238,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
@@ -2269,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2307,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2326,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2356,7 +2483,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2409,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -2441,7 +2568,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+ hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
addr_count = netdev->mc_count;
@@ -2449,6 +2576,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2517,7 +2646,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
@@ -2534,7 +2663,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2574,7 +2703,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2584,8 +2713,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2668,7 +2797,7 @@ link_cfg_out:
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2682,8 +2811,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2697,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
u32 dmatxctl;
u32 gpie;
+ u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
@@ -2709,6 +2839,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2765,7 +2899,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
@@ -2779,14 +2913,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2860,7 +3006,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -2870,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
return 0;
}
@@ -2918,7 +3070,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
@@ -3024,7 +3177,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -3036,7 +3189,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3050,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3076,7 +3240,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3116,13 +3280,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
@@ -3286,6 +3450,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -3299,6 +3476,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
@@ -3388,9 +3574,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
@@ -3417,8 +3603,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3436,18 +3622,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
@@ -3460,12 +3646,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
@@ -3498,9 +3684,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
@@ -3528,8 +3714,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
@@ -3560,8 +3746,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
@@ -3570,6 +3756,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -3583,8 +3787,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
@@ -3614,33 +3821,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
@@ -3695,6 +3932,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
@@ -3736,7 +3976,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -3863,10 +4107,16 @@ err_set_interrupt:
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
@@ -4036,6 +4286,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4055,7 +4308,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4097,7 +4352,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@ -4121,7 +4376,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4167,7 +4424,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@ -4210,8 +4467,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
@@ -4247,8 +4504,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
@@ -4373,6 +4630,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -4520,8 +4782,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
@@ -4529,11 +4791,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4772,7 +5034,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
@@ -4781,6 +5043,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
netif_tx_start_all_queues(adapter->netdev);
}
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
@@ -4792,13 +5056,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
+ u32 link_speed;
+ bool link_up;
int i;
struct ixgbe_ring *tx_ring;
int some_tx_pending = 0;
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_lock(&ixgbe_watchdog_lock);
+
+ link_up = adapter->link_up;
+ link_speed = adapter->link_speed;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4869,7 +5136,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
@@ -4887,7 +5154,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
ixgbe_update_stats(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_unlock(&ixgbe_watchdog_lock);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -4918,7 +5185,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5157,19 +5424,19 @@ dma_error:
tx_buffer_info->dma = 0;
tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
}
- return count;
+ return 0;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5319,8 +5586,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
return txq;
+ }
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5368,7 +5638,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
@@ -5474,7 +5744,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
@@ -5566,6 +5837,10 @@ static void ixgbe_netpoll(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -5607,6 +5882,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -5781,6 +6111,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -5801,6 +6133,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -5927,6 +6262,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -5939,6 +6281,8 @@ err_register:
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
@@ -6007,6 +6351,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..d75f9148eb1f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..be7ab3309ab7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 9ecad17522c3..1c1efd386956 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9b700f5bf1ed..9cf5f3b4cc5d 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..d4cd20f30199
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ int i;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ }
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+ u32 ctrl;
+
+ /* Check if global VLAN already set, if not set it */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_AUPE |
+ IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE |
+ IXGBE_VMOLR_BAM);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf);
+
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ if (adapter->vfinfo[vf].rar > 0) {
+ adapter->hw.mac.ops.clear_rar(&adapter->hw,
+ adapter->vfinfo[vf].rar);
+ adapter->vfinfo[vf].rar = -1;
+ }
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+ vf, IXGBE_RAH_AV);
+ if (adapter->vfinfo[vf].rar < 0) {
+ DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+ return -1;
+ }
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+ return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = (event_mask & 0x3f);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+ "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vfn,
+ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+ }
+
+ return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ printk(KERN_ERR "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac))
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ else
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ break;
+ default:
+ DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..51d1106c45a1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 84650c6ebe03..0db67c19b2c4 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
-#include <linux/list.h>
+#include <linux/netdevice.h>
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTPCS 0x0CD00
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1843,6 +1857,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le32
#define __le32 u32
@@ -2385,7 +2405,7 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2483,37 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
};
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
struct ixgbe_hw {
u8 __iomem *hw_addr;
void *back;
@@ -2472,6 +2523,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2486,6 +2538,7 @@ struct ixgbe_info {
struct ixgbe_mac_operations *mac_ops;
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 000000000000..dd4e0d27e8cc
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 000000000000..c44fdb05447a
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..399be0c34c36
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (data)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ } else {
+ ixgbevf_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ netif_tx_start_all_queues(netdev);
+ }
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err;
+ u32 new_rx_count, new_tx_count;
+ bool need_tx_update = false;
+ bool need_rx_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter,
+ &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ kfree(tx_ring);
+ goto err_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+ need_tx_update = true;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if ((!rx_ring) && (need_tx_update)) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter,
+ &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ goto err_rx_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+ need_rx_update = true;
+ }
+
+err_rx_setup:
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_tx_update || need_rx_update) {
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+ }
+
+ /* tx */
+ if (need_tx_update) {
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ tx_ring = NULL;
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (need_rx_update) {
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ rx_ring = NULL;
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+ /* success! */
+ err = 0;
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+err_setup:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_rx_csum = ixgbevf_get_rx_csum,
+ .set_rx_csum = ixgbevf_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbevf_set_tso,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..f7015efbff05
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u16 head;
+ u16 tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+ struct vlan_group *vlgrp;
+#endif
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..b9f10d05049d
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ unsigned int eop)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of eop */
+ head = readl(hw->hw_addr + tx_ring->head);
+ tail = readl(hw->hw_addr + tx_ring->tail);
+ adapter->detect_tx_hung = false;
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+ /* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ printk(KERN_ERR "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ if (adapter->detect_tx_hung) {
+ if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ printk(KERN_INFO
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbevf_tx_timeout(adapter->netdev);
+ }
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+
+ return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ int ret;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_gro_receive(&q_vector->napi,
+ adapter->vlgrp,
+ tag, skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ ret = netif_rx(skb);
+ }
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb->len - skb->data_len;
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ adapter->netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+
+ return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+ u32 msg;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 10));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j;
+ u32 ctrl;
+
+ adapter->vlgrp = grp;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *v_netdev;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable(adapter, true, true);
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq)
+{
+ struct dev_mc_list *mc_ptr;
+ u8 *addr = *mc_addr_ptr;
+ *vmdq = 0;
+
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ /* reprogram multicast list */
+ addr_count = netdev->mc_count;
+ if (addr_count)
+ addr_list = netdev->mc_list->dmi_addr;
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ WARN_ON(in_interrupt());
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * Check if PF is up before re-init. If not then skip until
+ * later when the PF is up and ready to service requests from
+ * the VF via mailbox. If the VF is up and running then the
+ * watchdog task will continue to schedule reset tasks until
+ * the PF is up and running.
+ */
+ if (!hw->mac.ops.reset_hw(hw)) {
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+ }
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ random_ether_addr(hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+ ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps" : "1 Gbps"));
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else {
+ /* Force detection of hung controller */
+ adapter->detect_tx_hung = true;
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+pf_has_reset:
+ ixgbevf_update_stats(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vmalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vmalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ "Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = frag->page_offset;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = &ixgbevf_open,
+ .ndo_stop = &ixgbevf_close,
+ .ndo_start_xmit = &ixgbevf_xmit_frame,
+ .ndo_get_stats = &ixgbevf_get_stats,
+ .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &ixgbevf_set_mac,
+ .ndo_change_mtu = &ixgbevf_change_mtu,
+ .ndo_tx_timeout = &ixgbevf_tx_timeout,
+ .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+};
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = netdev_priv(dev);
+ dev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mac_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled \n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->watchdog_task);
+
+ flush_scheduled_work();
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 000000000000..b8143501e6fc
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 000000000000..1b0e0bf4c0f5
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 000000000000..12f75960aec1
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 000000000000..4b5dec0ec140
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ vector_list[i] = vector;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 000000000000..799600e92700
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc3574..26eed49d3208 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2994,7 +2994,7 @@ jme_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id jme_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
{ }
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c146304d8d6c..c0ceebccaa49 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
static irqreturn_t ks_irq(int irq, void *pw)
{
- struct ks_net *ks = pw;
- struct net_device *netdev = ks->netdev;
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
u16 status;
/*this should be the first in IRQ handler */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 336e7c7a9275..a8522bd73ae7 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -134,7 +134,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f64..a8768672dc5a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
/* 2003-12-26: Make sure Asante cards always work. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -34,31 +36,36 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/hwtest.h>
#include <asm/macints.h>
static char version[] =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
-/* Unfortunately it seems we have to hardcode these for the moment */
-/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
#define DAYNA_8390_BASE 0x80000
#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
MAC8390_KINETICS,
};
-static const char * cardname[] = {
+static const char *cardname[] = {
"apple",
"asante",
"farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
"kinetics",
};
-static int word16[] = {
+static const int word16[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
};
/* on which cards do we use NuBus resources? */
-static int useresources[] = {
+static const int useresources[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
ACCESS_16,
};
-extern int mac8390_memtest(struct net_device * dev);
-static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
enum mac8390_type type);
-static int mac8390_open(struct net_device * dev);
-static int mac8390_close(struct net_device * dev);
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
static void mac8390_no_reset(struct net_device *dev);
static void interlan_reset(struct net_device *dev);
/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
-static void sane_block_input(struct net_device * dev, int count,
- struct sk_buff * skb, int ring_offset);
-static void sane_block_output(struct net_device * dev, int count,
- const unsigned char * buf, const int start_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
/* dayna_memcpy to and from card */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
switch (dev->dr_sw) {
- case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_APPLE_SONIC_NB:
- case NUBUS_DRHW_APPLE_SONIC_LC:
- case NUBUS_DRHW_SONNET:
- return MAC8390_NONE;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_ASANTE_LC:
- return MAC8390_NONE;
- break;
- case NUBUS_DRHW_CABLETRON:
- return MAC8390_CABLETRON;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_ASANTE:
- return MAC8390_ASANTE;
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_TECHWORKS:
- case NUBUS_DRSW_DAYNA2:
- case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
- return MAC8390_CABLETRON;
- else
- return MAC8390_APPLE;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
break;
-
- case NUBUS_DRSW_FARALLON:
- return MAC8390_FARALLON;
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_INTERLAN:
- return MAC8390_INTERLAN;
- break;
- default:
- return MAC8390_KINETICS;
- break;
- }
- break;
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
- case NUBUS_DRSW_DAYNA:
- // These correspond to Dayna Sonic cards
- // which use the macsonic driver
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN )
- return MAC8390_NONE;
- else
- return MAC8390_DAYNA;
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
}
return MAC8390_NONE;
}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy((char *)membase, (char *)&outdata, 4);
+ memcpy(membase, &outdata, 4);
/* Now compare them */
if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
- word_memcpy_tocard((char *)membase, (char *)&outdata, 4);
+ word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
- word_memcpy_fromcard((char *)&indata, (char *)membase, 4);
+ word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
local_irq_save(flags);
/* Check up to 32K in 4K increments */
for (i = 0; i < 8; i++) {
- volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
/* Unwriteable - we have a fully decoded card and the
RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
/* check for partial decode and wrap */
for (j = 0; j < i; j++) {
- volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
if (*p != (0xA5A0 | j))
break;
- }
- }
+ }
+ }
local_irq_restore(flags);
- /* in any case, we stopped once we tried one block too many,
- or once we reached 32K */
- return i * 0x1000;
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
}
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- volatile unsigned short *i;
- int version_disp = 0;
- struct nubus_dev * ndev = NULL;
+ struct nubus_dev *ndev = NULL;
int err = -ENODEV;
- struct nubus_dir dir;
- struct nubus_dirent ent;
- int offset;
static unsigned int slots;
enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
/* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
+ if (slots & (1 << ndev->board->slot))
continue;
- slots |= 1<<ndev->board->slot;
+ slots |= 1 << ndev->board->slot;
- if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
continue;
- if (version_disp == 0) {
- version_disp = 1;
- printk(version);
- }
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
- /* This is getting to be a habit */
- dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
-
- /* Get some Nubus info - we will trust the card's idea
- of where its memory and registers are. */
-
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- printk(KERN_ERR "%s: Unable to get Nubus functional"
- " directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (!mac8390_init(dev, ndev, cardtype))
continue;
- }
-
- /* Get the MAC address */
- if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
- printk(KERN_INFO "%s: Couldn't get MAC address!\n",
- dev->name);
- continue;
- } else {
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
- }
-
- if (useresources[cardtype] == 1) {
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
- printk(KERN_ERR "%s: Memory offset resource"
- " for slot %X not found!\n",
- dev->name, ndev->board->slot);
- continue;
- }
- nubus_get_rsrc_mem(&offset, &ent, 4);
- dev->mem_start = dev->base_addr + offset;
- /* yes, this is how the Apple driver does it */
- dev->base_addr = dev->mem_start + 0x10000;
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
- printk(KERN_INFO "%s: Memory length resource"
- " for slot %X not found"
- ", probing\n",
- dev->name, ndev->board->slot);
- offset = mac8390_memsize(dev->mem_start);
- } else {
- nubus_get_rsrc_mem(&offset, &ent, 4);
- }
- dev->mem_end = dev->mem_start + offset;
- } else {
- switch (cardtype) {
- case MAC8390_KINETICS:
- case MAC8390_DAYNA: /* it's the same */
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_INTERLAN:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_CABLETRON:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_MEM);
- /* The base address is unreadable if 0x00
- * has been written to the command register
- * Reset the chip by writing E8390_NODMA +
- * E8390_PAGE0 + E8390_STOP just to be
- * sure
- */
- i = (void *)dev->base_addr;
- *i = 0x21;
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
-
- default:
- printk(KERN_ERR "Card type %s is"
- " unsupported, sorry\n",
- ndev->board->name);
- continue;
- }
- }
/* Do the nasty 8390 stuff */
if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
dev_mac890[i] = dev;
}
if (!i) {
- printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ pr_notice("No useable cards found, driver NOT installed.\n");
return -ENODEV;
}
return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
- enum mac8390_type type)
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
{
- static u32 fwrd4_offsets[16]={
+ static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
16, 20, 24, 28,
32, 36, 40, 44,
48, 52, 56, 60
};
- static u32 back4_offsets[16]={
+ static u32 back4_offsets[16] = {
60, 56, 52, 48,
44, 40, 36, 32,
28, 24, 20, 16,
12, 8, 4, 0
};
- static u32 fwrd2_offsets[16]={
+ static u32 fwrd2_offsets[16] = {
0, 2, 4, 6,
8, 10, 12, 14,
16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Cabletron's TX/RX buffers are backwards */
if (type == MAC8390_CABLETRON) {
- ei_status.tx_start_page = CABLETRON_TX_START_PG;
- ei_status.rx_start_page = CABLETRON_RX_START_PG;
- ei_status.stop_page = CABLETRON_RX_STOP_PG;
- ei_status.rmem_start = dev->mem_start;
- ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
} else {
- ei_status.tx_start_page = WD_START_PG;
- ei_status.rx_start_page = WD_START_PG + TX_PAGES;
- ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- ei_status.rmem_end = dev->mem_end;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
}
/* Fill in model-specific information and functions */
- switch(type) {
+ switch (type) {
case MAC8390_FARALLON:
case MAC8390_APPLE:
- switch(mac8390_testio(dev->mem_start)) {
- case ACCESS_UNKNOWN:
- printk("Don't know how to access card memory!\n");
- return -ENODEV;
- break;
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_info("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
- case ACCESS_16:
- /* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- break;
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
- case ACCESS_32:
- /* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- access_bitmode = 1;
- break;
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
}
break;
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
ei_status.block_input = &slow_sane_block_input;
ei_status.block_output = &slow_sane_block_output;
ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = fwrd4_offsets;
- break;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
default:
- printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name);
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO
- "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
{
__ei_open(dev);
if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
{
ei_status.txing = 0;
if (ei_debug > 1)
- printk("reset not supported\n");
+ pr_info("reset not supported\n");
return;
}
static void interlan_reset(struct net_device *dev)
{
- unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq));
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
if (ei_debug > 1)
- printk("Need to reset the NS8390 t=%lu...", jiffies);
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
if (ei_debug > 1)
- printk("reset complete\n");
+ pr_cont("reset complete\n");
return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
/* directly from daynaport.c by Alan Cox */
-static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
{
volatile unsigned char *ptr;
- unsigned char *target=to;
- from<<=1; /* word, skip overhead */
- ptr=(unsigned char *)(dev->mem_start+from);
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
/* Leading byte? */
- if (from&2) {
+ if (from & 2) {
*target++ = ptr[-1];
ptr += 2;
count--;
}
- while(count>=2)
- {
+ while (count >= 2) {
*(unsigned short *)target = *(unsigned short volatile *)ptr;
ptr += 4; /* skip cruft */
target += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
+ if (count)
*target = *ptr;
}
-static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned char *src=from;
- to<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+to);
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
/* Leading byte? */
- if (to&2) { /* avoid a byte write (stomps on other data) */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
- while(count>=2)
- {
- *ptr++=*(unsigned short *)src; /* Copy and */
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
src += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
- {
+ if (count) {
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(*src << 8);
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
}
}
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ memcpy_toio(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
}
/* dayna block input/output */
-static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
/* Fix endianness */
- hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
}
-static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
/* Note the offset math is done in card memory space which is word
per long onto our space. */
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
dayna_memcpy_fromcard(dev, skb->data + semi_count,
ei_status.rmem_start - dev->mem_start,
count);
- }
- else
- {
+ } else {
dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
}
}
-static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
}
/* Cabletron block I/O */
-static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page)
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
-static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, semi_count);
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
(char *)ei_status.rmem_start, count);
- }
- else
- {
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, count);
+ } else {
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base, count);
}
}
-static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
const unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
const volatile unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b34..40faa368b07a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,31 +39,6 @@ struct macvlan_port {
struct list_head vlans;
};
-/**
- * struct macvlan_rx_stats - MACVLAN percpu rx stats
- * @rx_packets: number of received packets
- * @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
- * @rx_errors: number of errors
- */
-struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
-};
-
-struct macvlan_dev {
- struct net_device *dev;
- struct list_head list;
- struct hlist_node hlist;
- struct macvlan_port *port;
- struct net_device *lowerdev;
- struct macvlan_rx_stats *rx_stats;
- enum macvlan_mode mode;
-};
-
-
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -118,31 +93,17 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
return 0;
}
-static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast)
-{
- struct macvlan_rx_stats *rx_stats;
-
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
- if (likely(success)) {
- rx_stats->rx_packets++;;
- rx_stats->rx_bytes += len;
- if (multicast)
- rx_stats->multicast++;
- } else {
- rx_stats->rx_errors++;
- }
-}
-static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
+static int macvlan_broadcast_one(struct sk_buff *skb,
+ const struct macvlan_dev *vlan,
const struct ethhdr *eth, bool local)
{
+ struct net_device *dev = vlan->dev;
if (!skb)
return NET_RX_DROP;
if (local)
- return dev_forward_skb(dev, skb);
+ return vlan->forward(dev, skb);
skb->dev = dev;
if (!compare_ether_addr_64bits(eth->h_dest,
@@ -151,7 +112,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return vlan->receive(skb);
}
static void macvlan_broadcast(struct sk_buff *skb,
@@ -175,7 +136,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan->dev, eth,
+ err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
@@ -238,7 +199,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- netif_rx(skb);
+ vlan->receive(skb);
return NULL;
}
@@ -260,7 +221,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
unsigned int length = skb->len + ETH_HLEN;
- int ret = dev_forward_skb(dest->dev, skb);
+ int ret = dest->forward(dest->dev, skb);
macvlan_count_rx(dest, length,
ret == NET_RX_SUCCESS, 0);
@@ -269,12 +230,12 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
- skb->dev = vlan->lowerdev;
+ skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}
-static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -290,6 +251,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
return ret;
}
+EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -418,7 +380,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -623,8 +585,11 @@ static int macvlan_get_tx_queues(struct net *net,
return 0;
}
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ int (*receive)(struct sk_buff *skb),
+ int (*forward)(struct net_device *dev,
+ struct sk_buff *skb))
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -664,6 +629,8 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
+ vlan->receive = receive;
+ vlan->forward = forward;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
@@ -677,8 +644,17 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
}
+EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static void macvlan_dellink(struct net_device *dev, struct list_head *head)
+static int macvlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ netif_rx,
+ dev_forward_skb);
+}
+
+void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port = vlan->port;
@@ -689,6 +665,7 @@ static void macvlan_dellink(struct net_device *dev, struct list_head *head)
if (list_empty(&port->vlans))
macvlan_port_destroy(port->dev);
}
+EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -720,19 +697,27 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
};
-static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
+int macvlan_link_register(struct rtnl_link_ops *ops)
+{
+ /* common fields */
+ ops->priv_size = sizeof(struct macvlan_dev);
+ ops->get_tx_queues = macvlan_get_tx_queues;
+ ops->setup = macvlan_setup;
+ ops->validate = macvlan_validate;
+ ops->maxtype = IFLA_MACVLAN_MAX;
+ ops->policy = macvlan_policy;
+ ops->changelink = macvlan_changelink;
+ ops->get_size = macvlan_get_size;
+ ops->fill_info = macvlan_fill_info;
+
+ return rtnl_link_register(ops);
+};
+EXPORT_SYMBOL_GPL(macvlan_link_register);
+
+static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
- .priv_size = sizeof(struct macvlan_dev),
- .get_tx_queues = macvlan_get_tx_queues,
- .setup = macvlan_setup,
- .validate = macvlan_validate,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
- .maxtype = IFLA_MACVLAN_MAX,
- .policy = macvlan_policy,
- .changelink = macvlan_changelink,
- .get_size = macvlan_get_size,
- .fill_info = macvlan_fill_info,
};
static int macvlan_device_event(struct notifier_block *unused,
@@ -761,7 +746,7 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_UNREGISTER:
list_for_each_entry_safe(vlan, next, &port->vlans, list)
- macvlan_dellink(vlan->dev, NULL);
+ vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
}
return NOTIFY_DONE;
@@ -778,7 +763,7 @@ static int __init macvlan_init_module(void)
register_netdevice_notifier(&macvlan_notifier_block);
macvlan_handle_frame_hook = macvlan_handle_frame;
- err = rtnl_link_register(&macvlan_link_ops);
+ err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
new file mode 100644
index 000000000000..ad1f6ef89308
--- /dev/null
+++ b/drivers/net/macvtap.c
@@ -0,0 +1,581 @@
+#include <linux/etherdevice.h>
+#include <linux/if_macvlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+
+/*
+ * A macvtap queue is the central object of this driver, it connects
+ * an open character device to a macvlan interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * macvtap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ * TODO: multiqueue support is currently not implemented, even though
+ * macvtap is basically prepared for that. We will need to add this
+ * here as well as in virtio-net and qemu to get line rate on 10gbit
+ * adapters from a guest.
+ */
+struct macvtap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct macvlan_dev *vlan;
+ struct file *file;
+};
+
+static struct proto macvtap_proto = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof (struct macvtap_queue),
+};
+
+/*
+ * Minor number matches netdev->ifindex, so need a potentially
+ * large value. This also makes it possible to split the
+ * tap functionality out again in the future by offering it
+ * from other drivers besides macvtap. As long as every device
+ * only has one tap, the interface numbers assure that the
+ * device nodes are unique.
+ */
+static unsigned int macvtap_major;
+#define MACVTAP_NUM_DEVS 65536
+static struct class *macvtap_class;
+static struct cdev macvtap_cdev;
+
+/*
+ * RCU usage:
+ * The macvtap_queue is referenced both from the chardev struct file
+ * and from the struct macvlan_dev using rcu_read_lock.
+ *
+ * We never actually update the contents of a macvtap_queue atomically
+ * with RCU but it is used for race-free destruction of a queue when
+ * either the file or the macvlan_dev goes away. Pointers back to
+ * the dev and the file are implicitly valid as long as the queue
+ * exists.
+ *
+ * The callbacks from macvlan are always done with rcu_read_lock held
+ * already, while in the file_operations, we get it ourselves.
+ *
+ * When destroying a queue, we remove the pointers from the file and
+ * from the dev and then synchronize_rcu to make sure no thread is
+ * still using the queue. There may still be references to the struct
+ * sock inside of the queue from outbound SKBs, but these never
+ * reference back to the file or the dev. The data structure is freed
+ * through __sk_free when both our references and any pending SKBs
+ * are gone.
+ *
+ * macvtap_lock is only used to prevent multiple concurrent open()
+ * calls to assign a new vlan->tap pointer. It could be moved into
+ * the macvlan_dev itself but is extremely rarely used.
+ */
+static DEFINE_SPINLOCK(macvtap_lock);
+
+/*
+ * Choose the next free queue, for now there is only one
+ */
+static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ int err = -EBUSY;
+
+ spin_lock(&macvtap_lock);
+ if (rcu_dereference(vlan->tap))
+ goto out;
+
+ err = 0;
+ q->vlan = vlan;
+ rcu_assign_pointer(vlan->tap, q);
+
+ q->file = file;
+ rcu_assign_pointer(file->private_data, q);
+
+out:
+ spin_unlock(&macvtap_lock);
+ return err;
+}
+
+/*
+ * We must destroy each queue exactly once, when either
+ * the netdev or the file go away.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ *
+ * synchronize_rcu serializes with the packet flow
+ * that uses rcu_read_lock.
+ */
+static void macvtap_del_queue(struct macvtap_queue **qp)
+{
+ struct macvtap_queue *q;
+
+ spin_lock(&macvtap_lock);
+ q = rcu_dereference(*qp);
+ if (!q) {
+ spin_unlock(&macvtap_lock);
+ return;
+ }
+
+ rcu_assign_pointer(q->vlan->tap, NULL);
+ rcu_assign_pointer(q->file->private_data, NULL);
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Since we only support one queue, just dereference the pointer.
+ */
+static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ return rcu_dereference(vlan->tap);
+}
+
+static void macvtap_del_queues(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ macvtap_del_queue(&vlan->tap);
+}
+
+static inline struct macvtap_queue *macvtap_file_get_queue(struct file *file)
+{
+ rcu_read_lock_bh();
+ return rcu_dereference(file->private_data);
+}
+
+static inline void macvtap_file_put_queue(void)
+{
+ rcu_read_unlock_bh();
+}
+
+/*
+ * Forward happens for data that gets sent from one macvlan
+ * endpoint to another one in bridge mode. We just take
+ * the skb and put it into the receive queue.
+ */
+static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+{
+ struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ if (!q)
+ return -ENOLINK;
+
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ wake_up(q->sk.sk_sleep);
+ return 0;
+}
+
+/*
+ * Receive is for data from the external interface (lowerdev),
+ * in case of macvtap, we can treat that the same way as
+ * forward, which macvlan cannot.
+ */
+static int macvtap_receive(struct sk_buff *skb)
+{
+ skb_push(skb, ETH_HLEN);
+ return macvtap_forward(skb->dev, skb);
+}
+
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ err = macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
+ if (err)
+ goto out;
+
+ devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ err = PTR_ERR(classdev);
+ macvtap_del_queues(dev);
+ }
+
+out:
+ return err;
+}
+
+static void macvtap_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ device_destroy(macvtap_class,
+ MKDEV(MAJOR(macvtap_major), dev->ifindex));
+
+ macvtap_del_queues(dev);
+ macvlan_dellink(dev, head);
+}
+
+static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+ .kind = "macvtap",
+ .newlink = macvtap_newlink,
+ .dellink = macvtap_dellink,
+};
+
+
+static void macvtap_sock_write_space(struct sock *sk)
+{
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+}
+
+static int macvtap_open(struct inode *inode, struct file *file)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct macvtap_queue *q;
+ int err;
+
+ err = -ENODEV;
+ if (!dev)
+ goto out;
+
+ /* check if this is a macvtap device */
+ err = -EINVAL;
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ goto out;
+
+ err = -ENOMEM;
+ q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &macvtap_proto);
+ if (!q)
+ goto out;
+
+ init_waitqueue_head(&q->sock.wait);
+ q->sock.type = SOCK_RAW;
+ q->sock.state = SS_CONNECTED;
+ sock_init_data(&q->sock, &q->sk);
+ q->sk.sk_allocation = GFP_ATOMIC; /* for now */
+ q->sk.sk_write_space = macvtap_sock_write_space;
+
+ err = macvtap_set_queue(dev, file, q);
+ if (err)
+ sock_put(&q->sk);
+
+out:
+ if (dev)
+ dev_put(dev);
+
+ return err;
+}
+
+static int macvtap_release(struct inode *inode, struct file *file)
+{
+ macvtap_del_queue((struct macvtap_queue **)&file->private_data);
+ return 0;
+}
+
+static unsigned int macvtap_poll(struct file *file, poll_table * wait)
+{
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+ unsigned int mask = POLLERR;
+
+ if (!q)
+ goto out;
+
+ mask = 0;
+ poll_wait(file, &q->sock.wait, wait);
+
+ if (!skb_queue_empty(&q->sk.sk_receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+out:
+ macvtap_file_put_queue();
+ return mask;
+}
+
+/* Get packet from user space buffer */
+static ssize_t macvtap_get_user(struct macvtap_queue *q,
+ const struct iovec *iv, size_t count,
+ int noblock)
+{
+ struct sk_buff *skb;
+ size_t len = count;
+ int err;
+
+ if (unlikely(len < ETH_HLEN))
+ return -EINVAL;
+
+ skb = sock_alloc_send_skb(&q->sk, NET_IP_ALIGN + len, noblock, &err);
+
+ if (!skb) {
+ macvlan_count_rx(q->vlan, 0, false, false);
+ return err;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, count);
+
+ if (skb_copy_datagram_from_iovec(skb, 0, iv, 0, len)) {
+ macvlan_count_rx(q->vlan, 0, false, false);
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ skb_set_network_header(skb, ETH_HLEN);
+
+ macvlan_start_xmit(skb, q->vlan->dev);
+
+ return count;
+}
+
+static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t result = -ENOLINK;
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+
+ if (!q)
+ goto out;
+
+ result = macvtap_get_user(q, iv, iov_length(iv, count),
+ file->f_flags & O_NONBLOCK);
+out:
+ macvtap_file_put_queue();
+ return result;
+}
+
+/* Put packet to the user space buffer */
+static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ const struct iovec *iv, int len)
+{
+ struct macvlan_dev *vlan = q->vlan;
+ int ret;
+
+ len = min_t(int, skb->len, len);
+
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, 0, len);
+
+ macvlan_count_rx(vlan, len, ret == 0, 0);
+
+ return ret ? ret : len;
+}
+
+static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct macvtap_queue *q = macvtap_file_get_queue(file);
+
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb;
+ ssize_t len, ret = 0;
+
+ if (!q) {
+ ret = -ENOLINK;
+ goto out;
+ }
+
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ add_wait_queue(q->sk.sk_sleep, &wait);
+ while (len) {
+ current->state = TASK_INTERRUPTIBLE;
+
+ /* Read frames from the queue */
+ skb = skb_dequeue(&q->sk.sk_receive_queue);
+ if (!skb) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ continue;
+ }
+ ret = macvtap_put_user(q, skb, iv, len);
+ kfree_skb(skb);
+ break;
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(q->sk.sk_sleep, &wait);
+
+out:
+ macvtap_file_put_queue();
+ return ret;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct macvtap_queue *q;
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ unsigned int u;
+ char devname[IFNAMSIZ];
+
+ switch (cmd) {
+ case TUNSETIFF:
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+ if (u != (IFF_TAP | IFF_NO_PI))
+ return -EINVAL;
+ return 0;
+
+ case TUNGETIFF:
+ q = macvtap_file_get_queue(file);
+ if (!q)
+ return -ENOLINK;
+ memcpy(devname, q->vlan->dev->name, sizeof(devname));
+ macvtap_file_put_queue();
+
+ if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ put_user((TUN_TAP_DEV | TUN_NO_PI), &ifr->ifr_flags))
+ return -EFAULT;
+ return 0;
+
+ case TUNGETFEATURES:
+ if (put_user((IFF_TAP | IFF_NO_PI), up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ if (get_user(u, up))
+ return -EFAULT;
+
+ q = macvtap_file_get_queue(file);
+ q->sk.sk_sndbuf = u;
+ macvtap_file_put_queue();
+ return 0;
+
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ /* TODO: add support for these, so far we don't
+ support any offload */
+ if (arg & (TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations macvtap_fops = {
+ .owner = THIS_MODULE,
+ .open = macvtap_open,
+ .release = macvtap_release,
+ .aio_read = macvtap_aio_read,
+ .aio_write = macvtap_aio_write,
+ .poll = macvtap_poll,
+ .llseek = no_llseek,
+ .unlocked_ioctl = macvtap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = macvtap_compat_ioctl,
+#endif
+};
+
+static int macvtap_init(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&macvtap_major, 0,
+ MACVTAP_NUM_DEVS, "macvtap");
+ if (err)
+ goto out1;
+
+ cdev_init(&macvtap_cdev, &macvtap_fops);
+ err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
+ if (err)
+ goto out2;
+
+ macvtap_class = class_create(THIS_MODULE, "macvtap");
+ if (IS_ERR(macvtap_class)) {
+ err = PTR_ERR(macvtap_class);
+ goto out3;
+ }
+
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out4;
+
+ return 0;
+
+out4:
+ class_unregister(macvtap_class);
+out3:
+ cdev_del(&macvtap_cdev);
+out2:
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+out1:
+ return err;
+}
+module_init(macvtap_init);
+
+static void macvtap_exit(void)
+{
+ rtnl_link_unregister(&macvtap_link_ops);
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+}
+module_exit(macvtap_exit);
+
+MODULE_ALIAS_RTNL_LINK("macvtap");
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386b..9f72cb45f4af 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
static const char *meth_str="SGI O2 Fast Ethernet";
-#define HAVE_TX_TIMEOUT
/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
#define TX_TIMEOUT (400*HZ/1000)
-#ifdef HAVE_TX_TIMEOUT
static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
-#endif
/*
* This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 291a505fd4fc..8f6e816a7395 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_port:
- for (port = 1; port <= dev->caps.num_ports; port++)
+ for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_mcg_table(dev);
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
return __mlx4_init_one(pdev, NULL);
}
-static struct pci_device_id mlx4_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1405a170bb43..e24072a9a979 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <asm/system.h>
-#include <linux/list.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -656,6 +655,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct sk_buff *skb;
int rx;
struct rx_desc *rx_desc;
+ int size;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
@@ -678,10 +678,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
+ size = skb->end - skb->data;
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
- skb->data, mp->skb_size,
+ skb->data, size,
DMA_FROM_DEVICE);
- rx_desc->buf_size = mp->skb_size;
+ rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
@@ -1695,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0d..c0884a9cba3c 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4085,7 +4085,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
-static struct pci_device_id myri10ge_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce27..2d7b3bbfed01 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
{ "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151c..85aec4f10131 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
};
-static struct pci_device_id ne2k_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
{ 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
{ 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
{ 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e4..861a0590b1f4 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
# MA 02111-1307, USA.
#
# The full GNU General Public License is included in this distribution
-# in the file called LICENSE.
+# in the file called "COPYING".
#
#
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index c1223c92d566..144d2e880422 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 65
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
+#define _NETXEN_NIC_LINUX_SUBVERSION 72
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.72"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f6878047..2a8ef5fc9663 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index ddd704ae0188..f8499e56cbee 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
-#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_COUNT 30
#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
#define NETXEN_MAX_EEPROM_LEN 1024
@@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
return NETXEN_NIC_REGS_LEN;
}
-struct netxen_niu_regs {
- __u32 reg[NETXEN_NIC_REGS_COUNT];
-};
-
-static struct netxen_niu_regs niu_registers[] = {
- {
- /* GB Mode */
- {
- NETXEN_NIU_GB_SERDES_RESET,
- NETXEN_NIU_GB0_MII_MODE,
- NETXEN_NIU_GB1_MII_MODE,
- NETXEN_NIU_GB2_MII_MODE,
- NETXEN_NIU_GB3_MII_MODE,
- NETXEN_NIU_GB0_GMII_MODE,
- NETXEN_NIU_GB1_GMII_MODE,
- NETXEN_NIU_GB2_GMII_MODE,
- NETXEN_NIU_GB3_GMII_MODE,
- NETXEN_NIU_REMOTE_LOOPBACK,
- NETXEN_NIU_GB0_HALF_DUPLEX,
- NETXEN_NIU_GB1_HALF_DUPLEX,
- NETXEN_NIU_RESET_SYS_FIFOS,
- NETXEN_NIU_GB_CRC_DROP,
- NETXEN_NIU_GB_DROP_WRONGADDR,
- NETXEN_NIU_TEST_MUX_CTL,
-
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- NETXEN_NIU_GB_MAC_CONFIG_1(0),
- NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
- NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
- NETXEN_NIU_GB_TEST_REG(0),
- NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
- NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- NETXEN_NIU_GB_MII_MGMT_CTRL(0),
- NETXEN_NIU_GB_MII_MGMT_STATUS(0),
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- NETXEN_NIU_GB_INTERFACE_CTRL(0),
- NETXEN_NIU_GB_INTERFACE_STATUS(0),
- NETXEN_NIU_GB_STATION_ADDR_0(0),
- NETXEN_NIU_GB_STATION_ADDR_1(0),
- -1,
- }
- },
- {
- /* XG Mode */
- {
- NETXEN_NIU_XG_SINGLE_TERM,
- NETXEN_NIU_XG_DRIVE_HI,
- NETXEN_NIU_XG_DRIVE_LO,
- NETXEN_NIU_XG_DTX,
- NETXEN_NIU_XG_DEQ,
- NETXEN_NIU_XG_WORD_ALIGN,
- NETXEN_NIU_XG_RESET,
- NETXEN_NIU_XG_POWER_DOWN,
- NETXEN_NIU_XG_RESET_PLL,
- NETXEN_NIU_XG_SERDES_LOOPBACK,
- NETXEN_NIU_XG_DO_BYTE_ALIGN,
- NETXEN_NIU_XG_TX_ENABLE,
- NETXEN_NIU_XG_RX_ENABLE,
- NETXEN_NIU_XG_STATUS,
- NETXEN_NIU_XG_PAUSE_THRESHOLD,
- NETXEN_NIU_XGE_CONFIG_0,
- NETXEN_NIU_XGE_CONFIG_1,
- NETXEN_NIU_XGE_IPG,
- NETXEN_NIU_XGE_STATION_ADDR_0_HI,
- NETXEN_NIU_XGE_STATION_ADDR_0_1,
- NETXEN_NIU_XGE_STATION_ADDR_1_LO,
- NETXEN_NIU_XGE_STATUS,
- NETXEN_NIU_XGE_MAX_FRAME_SIZE,
- NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
- NETXEN_NIU_XGE_TX_BYTE_CNT,
- NETXEN_NIU_XGE_TX_FRAME_CNT,
- NETXEN_NIU_XGE_RX_BYTE_CNT,
- NETXEN_NIU_XGE_RX_FRAME_CNT,
- NETXEN_NIU_XGE_AGGR_ERROR_CNT,
- NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
- NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
- NETXEN_NIU_XGE_CRC_ERROR_CNT,
- NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
- NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
- NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
- NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
- -1,
- }
- }
-};
-
static void
netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 mode, *regs_buff = p;
- int i, window;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
memset(p, 0, NETXEN_NIC_REGS_LEN);
+
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
- /* which mode */
- regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
- mode = regs_buff[0];
-
- /* Common registers to all the modes */
- regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
- /* GB/XGB Mode */
- mode = (mode / 2) - 1;
- window = 0;
- if (mode <= 1) {
- for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
- /* GB: port specific registers */
- if (mode == 0 && i >= 19)
- window = adapter->physical_port *
- NETXEN_NIC_PORT_WINDOW;
-
- regs_buff[i] = NXRD32(adapter,
- niu_registers[mode].reg[i - 3] + window);
- }
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
}
}
static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 status;
- int val;
+ u32 val, port;
- /* read which mode */
- if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if (adapter->phy_read &&
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
- else {
- val = netxen_get_phy_link(status);
- return !val;
- }
- } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
return (val == XG_LINK_UP) ? 0 : 1;
}
- return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 638369024908..622e4c8be937 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 2e364fee3cbb..dd45c7a9122e 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -345,8 +345,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
void
netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
{
- int val;
- val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
@@ -691,6 +690,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
struct list_head *head;
nx_mac_list_t *cur;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
@@ -775,17 +777,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
{
nx_nic_req_t req;
- u64 word;
- int rv;
+ u64 word[6];
+ int rv, i;
memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
- word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
- memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
@@ -1031,7 +1036,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
return 0;
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
__le32 *pmac = (__le32 *) mac;
u32 offset;
@@ -1056,7 +1061,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
return 0;
}
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
uint32_t crbaddr, mac_hi, mac_lo;
int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583a..e2c5b6f2df03 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index ba62411f3532..1c63610ead42 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -184,6 +184,8 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -785,7 +787,7 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
- old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f13a07f717c7..08780ef1c1f8 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -100,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -342,7 +342,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
- first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
@@ -432,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
{
int i;
unsigned char *p;
- __le64 mac_addr;
+ u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1952,12 +1952,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
linkup = (val == XG_LINK_UP_P3);
} else {
val = NXRD32(adapter, CRB_XG_STATE);
- if (adapter->ahw.port_type == NETXEN_NIC_GBE)
- linkup = (val >> port) & 1;
- else {
- val = (val >> port*8) & 0xff;
- linkup = (val == XG_LINK_UP);
- }
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
}
netxen_advert_link_change(adapter, linkup);
@@ -1999,7 +1995,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
clear_bit(__NX_RESETTING, &adapter->state);
-
+ return;
} else {
clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
@@ -2327,7 +2323,9 @@ netxen_detach_work(struct work_struct *work)
netxen_nic_down(adapter, netdev);
+ rtnl_lock();
netxen_nic_detach(adapter);
+ rtnl_unlock();
status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
@@ -2600,7 +2598,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ce58c4c7dd3..af9a8647c7e8 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -58,7 +58,7 @@ static void writeq(u64 val, void __iomem *reg)
}
#endif
-static struct pci_device_id niu_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
{}
};
@@ -2844,7 +2844,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit)
break;
udelay(1);
}
- if (limit < 0)
+ if (limit <= 0)
return -ENODEV;
return 0;
@@ -6372,7 +6372,7 @@ static void niu_set_rx_mode(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
np->flags |= NIU_FLAGS_MCAST;
- alt_cnt = dev->uc.count;
+ alt_cnt = netdev_uc_count(dev);
if (alt_cnt > niu_num_alt_addr(np)) {
alt_cnt = 0;
np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,7 +6381,7 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d41536..a3b6aa0f375d 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-static struct pci_device_id ns83820_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1e..d44d4a208bbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402affd..20273832bfce 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -211,7 +211,7 @@ static struct {
};
-static struct pci_device_id netdrv_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3fc433..7b17404d0858 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 8a5ae3b182ed..12e3233868e9 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr)
for (i = 0; i < 8; i++)
printk(KERN_CONT " %02X", ladrf[i]);
printk(KERN_CONT "\n");
- }
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 92ed3fbf89a5..776cad2f5715 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1741,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
@@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
MODULE_FIRMWARE("cis/PCMLM28.cis");
MODULE_FIRMWARE("cis/DP83903.cis");
MODULE_FIRMWARE("cis/LA-PCM.cis");
-MODULE_FIRMWARE("PE520.cis");
+MODULE_FIRMWARE("cis/PE520.cis");
MODULE_FIRMWARE("cis/NE2K.cis");
MODULE_FIRMWARE("cis/PE-200.cis");
MODULE_FIRMWARE("cis/tamarack.cis");
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a35e8f2..0dc7ff896eeb 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -45,6 +45,7 @@ static const char *const version =
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
@@ -58,7 +59,7 @@ static const char *const version =
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
-static struct pci_device_id pcnet32_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+ memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index c13cf64095b6..33c4b12a63ba 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -331,8 +331,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
return;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df8..65ed385c2ceb 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+ }
+
+
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd4e8d72dc08..e17b70291bbc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
(phydev->phy_id & phydrv->phy_id_mask));
}
+#ifdef CONFIG_PM
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->dev.driver;
@@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
return true;
}
-/* Suspend and resume. Copied from platform_suspend and
- * platform_resume
- */
-static int mdio_bus_suspend(struct device * dev, pm_message_t state)
+static int mdio_bus_suspend(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ /*
+ * We must stop the state machine manually, otherwise it stops out of
+ * control, possibly with the phydev->lock held. Upon resume, netdev
+ * may call phy routines that try to grab the same lock, and that may
+ * lead to a deadlock.
+ */
+ if (phydev->attached_dev)
+ phy_stop_machine(phydev);
+
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+
return phydrv->suspend(phydev);
}
-static int mdio_bus_resume(struct device * dev)
+static int mdio_bus_resume(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ int ret;
if (!mdio_bus_phy_may_suspend(phydev))
+ goto no_resume;
+
+ ret = phydrv->resume(phydev);
+ if (ret < 0)
+ return ret;
+
+no_resume:
+ if (phydev->attached_dev)
+ phy_start_machine(phydev, NULL);
+
+ return 0;
+}
+
+static int mdio_bus_restore(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!netdev)
return 0;
- return phydrv->resume(phydev);
+
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* The PHY needs to renegotiate. */
+ phydev->link = 0;
+ phydev->state = PHY_UP;
+
+ phy_start_machine(phydev, NULL);
+
+ return 0;
}
+static struct dev_pm_ops mdio_bus_pm_ops = {
+ .suspend = mdio_bus_suspend,
+ .resume = mdio_bus_resume,
+ .freeze = mdio_bus_suspend,
+ .thaw = mdio_bus_resume,
+ .restore = mdio_bus_restore,
+};
+
+#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
+
+#else
+
+#define MDIO_BUS_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
- .suspend = mdio_bus_suspend,
- .resume = mdio_bus_resume,
+ .pm = MDIO_BUS_PM_OPS,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c51721..0295097d6c44 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
-static void phy_state_machine(struct work_struct *work);
/**
* phy_start_machine - start PHY state machine tracking
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
- INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
schedule_delayed_work(&phydev->state_queue, HZ);
}
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
*/
-static void phy_state_machine(struct work_struct *work)
+void phy_state_machine(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b10fedd82143..db1794546c56 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN;
mutex_init(&dev->lock);
+ INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
return dev;
}
@@ -276,6 +277,22 @@ int phy_device_register(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_register);
/**
+ * phy_find_first - finds the first PHY device on the bus
+ * @bus: the target MII bus
+ */
+struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ int addr;
+
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ if (bus->phy_map[addr])
+ return bus->phy_map[addr];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(phy_find_first);
+
+/**
* phy_prepare_link - prepares the PHY layer to monitor link status
* @phydev: target phy_device struct
* @handler: callback function for link status change notifications
@@ -378,6 +395,20 @@ void phy_disconnect(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_disconnect);
+int phy_init_hw(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->drv || !phydev->drv->config_init)
+ return 0;
+
+ ret = phy_scan_fixups(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phydev->drv->config_init(phydev);
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -425,21 +456,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
- if (phydev->drv->config_init) {
- int err;
-
- err = phy_scan_fixups(phydev);
-
- if (err < 0)
- return err;
-
- err = phydev->drv->config_init(phydev);
-
- if (err < 0)
- return err;
- }
-
- return 0;
+ return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd7..ed2644a57500 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
static int smsc_phy_config_intr(struct phy_device *phydev)
{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
return smsc_phy_ack_interrupt (phydev);
}
+static int lan911x_config_init(struct phy_device *phydev)
+{
+ return smsc_phy_ack_interrupt(phydev);
+}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edbe..6d61602208c1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
- int speed; /* speed of the corresponding ppp channel*/
+ int speed; /* speed of the corresponding ppp channel*/
#endif /* CONFIG_PPP_MULTILINK */
};
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
- int len, totlen;
- int i, bits, hdrlen, mtu;
- int flen;
- int navail, nfree, nzero;
- int nbigger;
- int totspeed;
- int totfree;
+ int len, totlen;
+ int i, bits, hdrlen, mtu;
+ int flen;
+ int navail, nfree, nzero;
+ int nbigger;
+ int totspeed;
+ int totfree;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
- nfree = 0; /* # channels which have no packet already queued */
- navail = 0; /* total # of usable channels (not deregistered) */
- nzero = 0; /* number of channels with zero speed associated*/
- totfree = 0; /*total # of channels available and
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
+ nzero = 0; /* number of channels with zero speed associated*/
+ totfree = 0; /*total # of channels available and
*having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- i = 0;
- list_for_each_entry(pch, &ppp->channels, clist) {
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
pch->speed = pch->chan->speed;
- if (pch->avail) {
+ if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
- !pch->had_frag) {
+ !pch->had_frag) {
if (pch->speed == 0)
nzero++;
else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
++nfree;
++totfree;
}
- if (!pch->had_frag && i < ppp->nxchan)
- ppp->nxchan = i;
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
}
++i;
}
/*
- * Don't start sending this packet unless at least half of
- * the channels are free. This gives much better TCP
- * performance if we have a lot of channels.
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
*/
- if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ if (nfree == 0 || nfree < navail / 2)
+ return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
- p = skb->data;
- len = skb->len;
+ p = skb->data;
+ len = skb->len;
if (*p == 0) {
++p;
--len;
}
totlen = len;
- nbigger = len % nfree;
+ nbigger = len % nfree;
- /* skip to the channel after the one we last used
- and start at that one */
+ /* skip to the channel after the one we last used
+ and start at that one */
list = &ppp->channels;
- for (i = 0; i < ppp->nxchan; ++i) {
+ for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
break;
}
}
- /* create a fragment for each channel */
+ /* create a fragment for each channel */
bits = B;
- while (len > 0) {
+ while (len > 0) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
continue;
}
- pch = list_entry(list, struct channel, clist);
+ pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
- * Skip this channel if it has a fragment pending already and
- * we haven't given a fragment to all of the free channels.
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
- if (nfree > 0)
+ if (nfree > 0)
continue;
} else {
pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == NULL) {
- /* can't use this channel, it's being deregistered */
+ /* can't use this channel, it's being deregistered */
if (pch->speed == 0)
nzero--;
else
- totspeed -= pch->speed;
+ totspeed -= pch->speed;
spin_unlock_bh(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
nfree--;
- if (--navail == 0)
+ if (--navail == 0)
break;
continue;
}
/*
*if the channel speed is not set divide
- *the packet evenly among the free channels;
+ *the packet evenly among the free channels;
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
flen = len;
if (nfree > 0) {
if (pch->speed == 0) {
- flen = totlen/nfree ;
+ flen = totlen/nfree;
if (nbigger > 0) {
flen++;
nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
}
/*
- *check if we are on the last channel or
- *we exceded the lenght of the data to
+ *check if we are on the last channel or
+ *we exceded the lenght of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
*above formula will be equal or less than zero.
*Skip the channel in this case
*/
- if (flen <= 0) {
+ if (flen <= 0) {
pch->avail = 2;
spin_unlock_bh(&pch->downl);
continue;
}
- mtu = pch->chan->mtu - hdrlen;
- if (mtu < 4)
- mtu = 4;
+ mtu = pch->chan->mtu - hdrlen;
+ if (mtu < 4)
+ mtu = 4;
if (flen > mtu)
flen = mtu;
- if (flen == len)
- bits |= E;
- frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (flen == len)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
- q = skb_put(frag, flen + hdrlen);
+ q = skb_put(frag, flen + hdrlen);
- /* make the MP header */
+ /* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) {
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* try to send it down the channel */
chan = pch->chan;
- if (!skb_queue_empty(&pch->file.xq) ||
+ if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
- pch->had_frag = 1;
+ pch->had_frag = 1;
p += flen;
- len -= flen;
+ len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock_bh(&pch->downl);
}
- ppp->nxchan = i;
+ ppp->nxchan = i;
return 1;
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 227b141c4fbd..2663b2fdc0bb 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1389,113 +1389,6 @@ static int gelic_wl_get_mode(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-/* SIOCIWFIRSTPRIV */
-static int hex2bin(u8 *str, u8 *bin, unsigned int len)
-{
- unsigned int i;
- static unsigned char *hex = "0123456789ABCDEF";
- unsigned char *p, *q;
- u8 tmp;
-
- if (len != WPA_PSK_LEN * 2)
- return -EINVAL;
-
- for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
- p = strchr(hex, toupper(str[i]));
- q = strchr(hex, toupper(str[i + 1]));
- if (!p || !q) {
- pr_info("%s: unconvertible PSK digit=%d\n",
- __func__, i);
- return -EINVAL;
- }
- tmp = ((p - hex) << 4) + (q - hex);
- *bin++ = tmp;
- }
- return 0;
-};
-
-static int gelic_wl_priv_set_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- unsigned int len;
- unsigned long irqflag;
- int ret = 0;
-
- pr_debug("%s:<- len=%d\n", __func__, data->data.length);
- len = data->data.length - 1;
- if (len <= 2)
- return -EINVAL;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- if (extra[0] == '"' && extra[len - 1] == '"') {
- pr_debug("%s: passphrase mode\n", __func__);
- /* pass phrase */
- if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
- pr_info("%s: passphrase too long\n", __func__);
- ret = -E2BIG;
- goto out;
- }
- memset(wl->psk, 0, sizeof(wl->psk));
- wl->psk_len = len - 2;
- memcpy(wl->psk, &(extra[1]), wl->psk_len);
- wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
- } else {
- ret = hex2bin(extra, wl->psk, len);
- if (ret)
- goto out;
- wl->psk_len = WPA_PSK_LEN;
- wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
- }
- set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
-out:
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:->\n", __func__);
- return ret;
-}
-
-static int gelic_wl_priv_get_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- char *p;
- unsigned long irqflag;
- unsigned int i;
-
- pr_debug("%s:<-\n", __func__);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- p = extra;
- if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
- if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
- for (i = 0; i < wl->psk_len; i++) {
- sprintf(p, "%02xu", wl->psk[i]);
- p += 2;
- }
- *p = '\0';
- data->data.length = wl->psk_len * 2;
- } else {
- *p++ = '"';
- memcpy(p, wl->psk, wl->psk_len);
- p += wl->psk_len;
- *p++ = '"';
- *p = '\0';
- data->data.length = wl->psk_len + 2;
- }
- } else
- /* no psk set */
- data->data.length = 0;
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:-> %d\n", __func__, data->data.length);
- return 0;
-}
-#endif
-
/* SIOCGIWNICKN */
static int gelic_wl_get_nick(struct net_device *net_dev,
struct iw_request_info *info,
@@ -1571,8 +1464,10 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
init_completion(&wl->scan_done);
/*
* If we have already a bss list, don't try to get new
+ * unless we are doing an ESSID scan
*/
- if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
+ if ((!essid_len && !always_scan)
+ && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
pr_debug("%s: already has the list\n", __func__);
complete(&wl->scan_done);
goto out;
@@ -1673,7 +1568,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
}
}
- /* put them in the newtork_list */
+ /* put them in the network_list */
for (i = 0, scan_info_size = 0, scan_info = buf;
scan_info_size < data_len;
i++, scan_info_size += be16_to_cpu(scan_info->size),
@@ -2009,7 +1904,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
/* PSK type */
wpa->psk_type = cpu_to_be16(wl->psk_type);
#ifdef DEBUG
- pr_debug("%s: sec=%s psktype=%s\nn", __func__,
+ pr_debug("%s: sec=%s psktype=%s\n", __func__,
wpasecstr(wpa->security),
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
"BIN" : "passphrase");
@@ -2019,9 +1914,9 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
* the debug log because this dumps your precious
* passphrase/key.
*/
- pr_debug("%s: psk=%s\n",
+ pr_debug("%s: psk=%s\n", __func__,
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
- (char *)"N/A" : (char *)wpa->psk);
+ "N/A" : wpa->psk);
#endif
#endif
/* issue wpa setup */
@@ -2406,40 +2301,10 @@ static const iw_handler gelic_wl_wext_handler[] =
IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
};
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-static struct iw_priv_args gelic_wl_private_args[] =
-{
- {
- .cmd = GELIC_WL_PRIV_SET_PSK,
- .set_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "set_psk"
- },
- {
- .cmd = GELIC_WL_PRIV_GET_PSK,
- .get_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "get_psk"
- }
-};
-
-static const iw_handler gelic_wl_private_handler[] =
-{
- gelic_wl_priv_set_psk,
- gelic_wl_priv_get_psk,
-};
-#endif
-
static const struct iw_handler_def gelic_wl_wext_handler_def = {
.num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
.standard = gelic_wl_wext_handler,
.get_wireless_stats = gelic_wl_get_wireless_stats,
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
- .num_private = ARRAY_SIZE(gelic_wl_private_handler),
- .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
- .private = gelic_wl_private_handler,
- .private_args = gelic_wl_private_args,
-#endif
};
static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8d..4ef0afbcbe1b 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
-static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
/* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
struct ql3_adapter *qdev = netdev_priv(ndev);
unregister_netdev(ndev);
- qdev = netdev_priv(ndev);
ql_disable_interrupts(qdev);
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..b40a851ec7d1
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+
+#include "qlcnic_hdr.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 0
+#define QLCNIC_LINUX_VERSIONID "5.0.0"
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3_MAX_MTU (9600)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* Tx defines */
+#define MAX_BUFFERS_PER_CMD 32
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ u32 findex;
+ u32 num_entries;
+ u32 entry_size;
+ u32 reserved[5];
+};
+
+struct uni_data_desc{
+ u32 findex;
+ u32 size;
+ u32 reserved[5];
+};
+
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct qlcnic_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct qlcnic_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define QLCNIC_CDRP_CMD_MAX 0x0000001f
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+
+/*
+ * Context state
+ */
+#define QLCHAL_VERSION 1
+
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+};
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+};
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+};
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+};
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+};
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+};
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+
+struct qlcnic_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define QLCNIC_INTR_DEFAULT 0x04
+
+union qlcnic_nic_intr_coalesce_data {
+ struct {
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ } data;
+ u64 word;
+};
+
+struct qlcnic_nic_intr_coalesce {
+ u16 stats_time_us;
+ u16 rate_sample_time;
+ u16 flags;
+ u16 rsvd_1;
+ u32 low_threshold;
+ u32 high_threshold;
+ union qlcnic_nic_intr_coalesce_data normal;
+ union qlcnic_nic_intr_coalesce_data low;
+ union qlcnic_nic_intr_coalesce_data high;
+ union qlcnic_nic_intr_coalesce_data irq;
+};
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_START 0
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
+#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
+#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
+#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
+#define QLCNIC_C2C_OPCODE 22
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define QLCNIC_H2C_OPCODE_LAST 25
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_START 128
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
+#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
+#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0x01
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+};
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 rx_csum;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 rsrvd1;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 dev_state;
+ u8 diag_test;
+ u8 diag_cnt;
+ u8 rsrd1;
+ u16 rsrd2;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct qlcnic_adapter_stats stats;
+
+ struct qlcnic_recv_context recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ struct qlcnic_nic_intr_coalesce coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
+int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define QLCRD32(adapter, off) \
+ (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+ (qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a) \
+ qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
+int qlcnic_check_loopback_buff(unsigned char *data);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_brdinfo {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ if (tx_ring->producer < tx_ring->sw_consumer)
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..0a6a39914aec
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+u32
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature;
+ u32 rcode = QLCNIC_RCODE_SUCCESS;
+ struct pci_dev *pdev = adapter->pdev;
+
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter))
+ return QLCNIC_RCODE_TIMEOUT;
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "card response timeout.\n");
+ rcode = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ rcode);
+ }
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+
+ return rcode;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ QLCNIC_CDRP_CMD_SET_MTU)) {
+
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+ int err;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+ }
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err;
+ u64 phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(temp - 0x200));
+
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ adapter->tx_context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ }
+}
+
+int
+qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
+{
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_READ_PHY)) {
+
+ return -EIO;
+ }
+
+ return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+}
+
+int
+qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+{
+ return qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ val,
+ 0,
+ QLCNIC_CDRP_CMD_WRITE_PHY);
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err;
+ int ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr);
+ if (tx_ring->hw_consumer == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ *(tx_ring->hw_consumer) = 0;
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate rds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = (struct rcv_desc *)addr;
+
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate sds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = (struct status_desc *)addr;
+ }
+
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+ }
+
+ recv_ctx = &adapter->recv_ctx;
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->hw_consumer != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..8da6ec8c13b9
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_called",
+ QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished",
+ QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped",
+ QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped",
+ QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed",
+ QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts",
+ QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts",
+ QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes",
+ QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes",
+ QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Loopback_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ CRB_CMDPEG_STATE,
+ CRB_RCVPEG_STATE,
+ CRB_XG_STATE_P3,
+ CRB_FW_CAPABILITIES_1,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u16 pcifn = adapter->ahw.pci_func;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ecmd->speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ __u32 status;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ /* autonegotiation */
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ ecmd->autoneg) != 0)
+ return -EIO;
+ else
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (qlcnic_fw_cmd_query_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+
+ switch (ecmd->speed) {
+ case SPEED_10:
+ qlcnic_set_phy_speed(status, 0);
+ break;
+ case SPEED_100:
+ qlcnic_set_phy_speed(status, 1);
+ break;
+ case SPEED_1000:
+ qlcnic_set_phy_speed(status, 2);
+ break;
+ }
+
+ if (ecmd->duplex == DUPLEX_HALF)
+ qlcnic_clear_phy_duplex(status);
+ if (ecmd->duplex == DUPLEX_FULL)
+ qlcnic_set_phy_duplex(status);
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ *((int *)&status)) != 0)
+ return -EIO;
+ else {
+ adapter->link_speed = ecmd->speed;
+ adapter->link_duplex = ecmd->duplex;
+ }
+ } else
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ for (i = 0; diag_registers[i] != -1; i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+ regs_buff[i++] = 1; /* No. of tx ring */
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+ regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = 2; /* No. of rx ring */
+ regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 val;
+
+ val = QLCRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ /* read mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return QLCNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define QLC_ILB_PKT_SIZE 64
+
+static void qlcnic_create_loopback_buff(unsigned char *data)
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+ memset(data, 0x4e, QLC_ILB_PKT_SIZE);
+ memset(data, 0xff, 12);
+ memcpy(data + 12, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data)
+{
+ unsigned char buff[QLC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff);
+ return memcmp(data, buff, QLC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data);
+ skb_put(skb, QLC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ msleep(5);
+
+ qlcnic_process_rcv_ring_diag(sds_ring);
+
+ dev_kfree_skb_any(skb);
+ if (!adapter->diag_cnt)
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ ret = qlcnic_set_ilb_mode(adapter);
+ if (ret)
+ goto done;
+
+ ret = qlcnic_do_ilb_test(adapter);
+
+ qlcnic_clear_ilb_mode(adapter);
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_it;
+
+ adapter->diag_cnt = 0;
+ ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
+ QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+ if (ret)
+ goto done;
+
+ msleep(10);
+
+ ret = !adapter->diag_cnt;
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ }
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* link test */
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ qlcnic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (qlcnic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ }
+}
+
+static u32 qlcnic_get_rx_csum(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ return adapter->rx_csum;
+}
+
+static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ adapter->rx_csum = !!data;
+ return 0;
+}
+
+static u32 qlcnic_get_tso(struct net_device *dev)
+{
+ return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+}
+
+static int qlcnic_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+static int qlcnic_blink_led(struct net_device *dev, u32 val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ ret = qlcnic_config_led(adapter, 1, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ return ret;
+ }
+
+ msleep_interruptible(val * 1000);
+
+ ret = qlcnic_config_led(adapter, 0, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int hw_lro;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
+
+ ethtool_op_set_flags(netdev, data);
+
+ hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+
+ return 0;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = qlcnic_get_tso,
+ .set_tso = qlcnic_set_tso,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_rx_csum = qlcnic_get_rx_csum,
+ .set_rx_csum = qlcnic_set_rx_csum,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = qlcnic_set_flags,
+ .phys_id = qlcnic_blink_led,
+};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..0469f84360a4
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MN_2M (0)
+#define QLCNIC_PCI_MS_2M (0x80000)
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
+#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
+#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
+
+#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
+#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
+
+#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0 (QLCNIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compability
+ */
+#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
+#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
+#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
+#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
+
+ /* Device State */
+#define QLCNIC_DEV_COLD 1
+#define QLCNIC_DEV_INITALIZING 2
+#define QLCNIC_DEV_READY 3
+#define QLCNIC_DEV_NEED_RESET 4
+#define QLCNIC_DEV_NEED_QUISCENT 5
+#define QLCNIC_DEV_FAILED 6
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..dc6cd69d6d93
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+
+static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static const struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ unsigned op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
+ u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ if (cur == NULL) {
+ dev_err(&adapter->netdev->dev,
+ "failed to add mac address filter\n");
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+
+ return qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dev_mc_list *mc_ptr;
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev->mc_count > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (netdev->mc_count > 0) {
+ for (mc_ptr = netdev->mc_list; mc_ptr;
+ mc_ptr = mc_ptr->next) {
+ qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
+ &del_list);
+ }
+ }
+
+send_fw_cmd:
+ qlcnic_nic_set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ adapter->flags ^= QLCNIC_LRO_ENABLED;
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ req.words[1] = cpu_to_le64(ip);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x reuqest\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu > P3_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
+ P3_MAX_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+{
+ u32 crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = QLCRD32(adapter, crbaddr);
+ mac_hi = QLCRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+ adapter->ahw.crb_win = window;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+
+ return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if ((addr & 0x00ff800) == 0xff800) {
+ if (printk_ratelimit())
+ dev_warn(&pdev->dev, "QM access not handled\n");
+ return -EIO;
+ }
+
+ window = OCM_WIN_P3P(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 data)
+{
+ int i, j, ret;
+ u32 temp, off8;
+ u64 stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ i = 0;
+ if (stride == 16) {
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+ }
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val, stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+ return qlcnic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ off8 = MIU_TEST_AGT_RDDATA_LO;
+ if ((stride == 16) && (off & 0xf))
+ off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+ temp = readl(mem_crb + off8 + 4);
+ val = (u64)temp << 32;
+ val |= readl(mem_crb + off8);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw.board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev,
+ "%sting loopback mode failed.\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_set_fw_loopback(adapter, 1))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..ea00ab4d4feb
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1541 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "qlcnic.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ size = sizeof(struct qlcnic_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ return -ENOMEM;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
+ vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ dev_err(&netdev->dev, "Failed to allocate "
+ "rx buffer ring %d\n", ring);
+ goto err_out;
+ }
+ memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = QLCNIC_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ int addr, int *valp)
+{
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* resetall */
+ qlcnic_rom_lock(adapter);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qlcnic_rom_unlock(adapter);
+
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* p2dn replyCount */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 & 1*/
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+static int
+qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ u32 i;
+ __le32 entries;
+ int mn_present = qlcnic_has_mn(adapter);
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -1;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ u32 flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -1;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(
+ *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + QLCNIC_UNI_BIOS_VERSION_OFF));
+
+ return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = qlcnic_get_fw_version(adapter);
+
+ version = QLCNIC_DECODE_VERSION(val);
+
+ major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (version > QLCNIC_VERSION_CODE(major, minor, build))
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (fw) {
+ __le64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ __le32 val;
+ u32 ver, min_ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_set_product_offs(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+
+ min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
+
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer */
+ if (qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&val))
+ return -EIO;
+
+ val = QLCNIC_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 60;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 2000;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
+ "length %d\n", cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!buffer->skb)
+ return -ENOMEM;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = QLCNIC_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = QLCNIC_BUFFER_FREE;
+ return skb;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+
+ int count = 0;
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct qlcnic_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct qlcnic_rx_buffer, list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ spin_lock(&rds_ring->lock);
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ spin_unlock(&rds_ring->lock);
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ skb_put(skb, rds_ring->skb_size);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+
+ if (!qlcnic_check_loopback_buff(skb->data))
+ adapter->diag_cnt++;
+
+ dev_kfree_skb_any(skb);
+
+ return buffer;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0;
+
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
+ ring, sts_data0);
+
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..665e8e56b6a8
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2720 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+
+MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
+ QLCNIC_LINUX_VERSIONID;
+
+static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0644);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout_task(struct work_struct *work);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
+static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+
+ if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+ netif_stop_queue(adapter->netdev);
+ smp_mb();
+ }
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return (recv_ctx->sds_rings == NULL);
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u64 mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
+{
+ int change, shift, err;
+ u64 mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = QLCRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (shift > 9)
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
+ (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
+ data = QLCNIC_PORT_MODE_802_3_AP;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_XG) {
+ data = QLCNIC_PORT_MODE_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else {
+ data = QLCNIC_PORT_MODE_AUTO_NEG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+ }
+ QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+
+ qlcnic_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+
+ qlcnic_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ qlcnic_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw.pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+ void __iomem *mem_ptr0 = NULL;
+ resource_size_t mem_base;
+ unsigned long mem_len, pci_len0 = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_len0 = pci_len0;
+
+ adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ return 0;
+}
+
+static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
+ qlcnic_boards[i].sub_device == pdev->subsystem_device) {
+ strcpy(name, qlcnic_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+
+ if (!found)
+ name = "Unknown";
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char serial_num[32];
+ int i, offset, val;
+ int *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (int *)&serial_num;
+ offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ i = QLCRD32(adapter, QLCNIC_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+ fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ adapter->flags &= ~QLCNIC_LRO_ENABLED;
+
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int val, err, first_boot;
+
+ err = qlcnic_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ if (!qlcnic_can_start_firmware(adapter))
+ goto wait_init;
+
+ first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
+ if (first_boot == 0x55555555)
+ /* This is the first boot after power up */
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+
+ qlcnic_request_firmware(adapter);
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto wait_init;
+
+ if (first_boot != 0x55555555) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ qlcnic_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ qlcnic_set_port_mode(adapter);
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+
+ val = (_QLCNIC_LINUX_MAJOR << 16)
+ | ((_QLCNIC_LINUX_MINOR << 8))
+ | (_QLCNIC_LINUX_SUBVERSION);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, val);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = qlcnic_phantom_init(adapter);
+ if (err)
+ goto err_out;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+
+ qlcnic_update_dma_mask(adapter);
+
+ qlcnic_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ handler = qlcnic_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = qlcnic_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
+{
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_init_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ return err;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_rxbuf;
+ }
+
+ qlcnic_init_coalesce_defaults(adapter);
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ }
+ }
+
+ qlcnic_detach(adapter);
+
+ adapter->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ return;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->diag_test = test;
+
+ ret = qlcnic_attach(adapter);
+ if (ret)
+ return ret;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_enable_int(sds_ring);
+ }
+ }
+
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err)
+ err = __qlcnic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->mc_enabled = 0;
+ adapter->max_mc_count = 38;
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = 2*HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_GRO);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ int err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ if (!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ mutex_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ err = qlcnic_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+
+ qlcnic_clear_stats(adapter);
+
+ qlcnic_setup_intr(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ qlcnic_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+
+err_out_decr_ref:
+ qlcnic_clr_all_drv_state(adapter);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_diag_entries(adapter);
+
+ qlcnic_cleanup_pci_map(adapter);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ qlcnic_detach(adapter);
+err_out:
+ qlcnic_clr_all_drv_state(adapter);
+ return err;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ qlcnic_detach(adapter);
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+qlcnic_tso_check(struct net_device *netdev,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (vlan_tx_tag_present(skb)) {
+
+ flags = FLAGS_VLAN_OOB;
+ vid = vlan_tx_tag_get(skb);
+ qlcnic_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = pci_map_page(pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ dev_info(&netdev->dev, "NIC Link is down\n");
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw.linkup && linkup) {
+ dev_info(&netdev->dev, "NIC Link is up\n");
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void qlcnic_tx_timeout_task(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter =
+ container_of(work, struct qlcnic_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (!qlcnic_reset_context(adapter)) {
+ adapter->netdev->trans_start = jiffies;
+ return;
+
+ /* context reset failed, fall through for fw reset */
+ }
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->diag_cnt++;
+ qlcnic_enable_int(sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ int done;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ __netif_tx_lock(tx_ring->txq, smp_processor_id());
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ __netif_tx_unlock(tx_ring->txq);
+ }
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ disable_irq(adapter->irq);
+ qlcnic_intr(adapter->irq, adapter);
+ enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return ;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ val |= ((u32)0x1 << (adapter->portnum * 4));
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+
+ if (!(val & 0x11111111))
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ int cnt = 0;
+ int portnum = adapter->portnum;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ if (!(val & ((int)0x1 << (portnum * 4)))) {
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
+ goto start_fw;
+ }
+
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+start_fw:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ qlcnic_api_unlock(adapter);
+ return 0;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ qlcnic_api_unlock(adapter);
+ return -1;
+ }
+
+ qlcnic_api_unlock(adapter);
+ msleep(1000);
+ while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
+ ++cnt < 20)
+ msleep(1000);
+
+ if (cnt >= 20)
+ return -1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ int dev_state;
+
+ if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ goto err_ret;
+
+ if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+
+ if (qlcnic_check_drv_state(adapter)) {
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+
+ goto err_ret;
+ }
+
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ rtnl_lock();
+ qlcnic_detach(adapter);
+ rtnl_unlock();
+
+ status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == QLCNIC_TEMP_PANIC)
+ goto err_ret;
+
+ qlcnic_set_drv_state(adapter, adapter->dev_state);
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+
+}
+
+static void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ }
+
+ qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err) {
+ qlcnic_detach(adapter);
+ goto done;
+ }
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ u32 state = 0, heartbit;
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ qlcnic_dev_request_reset(adapter);
+ goto detach;
+ }
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ adapter->need_fw_reset = 1;
+
+ heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ qlcnic_dev_request_reset(adapter);
+
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+
+ return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 4) || (offset & 0x3))
+ return -EINVAL;
+
+ if (offset < QLCNIC_PCI_CRBSPACE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static int
+qlcnic_destip_supported(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (!qlcnic_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+ return;
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ qlcnic_config_indev_addr(dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL || !netif_running(dev))
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter || !qlcnic_destip_supported(adapter))
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown
+};
+
+static int __init qlcnic_init_module(void)
+{
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+
+ return pci_register_driver(&qlcnic_driver);
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..ebfd17776b53 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,12 +54,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
-#define MAX_SPLIT_SIZE 1023
-#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +75,43 @@
#define TX_DESC_PER_OAL 0
#endif
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
*/
enum {
MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
};
/*
@@ -468,7 +492,7 @@ enum {
MDIO_PORT = 0x00000440,
MDIO_STATUS = 0x00000450,
- /* XGMAC AUX statistics registers */
+ XGMAC_REGISTER_END = 0x00000740,
};
/*
@@ -509,6 +533,7 @@ enum {
enum {
MAC_ADDR_IDX_SHIFT = 4,
MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
MAC_ADDR_TYPE_MASK = 0x000f0000,
MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +551,30 @@ enum {
MAC_ADDR_MR = (1 << 30),
MAC_ADDR_MW = (1 << 31),
MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
};
/*
@@ -596,6 +645,7 @@ enum {
enum {
RT_IDX_IDX_SHIFT = 8,
RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
RT_IDX_TYPE_RT = 0x00000000,
RT_IDX_TYPE_RT_INV = 0x00010000,
RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +714,89 @@ enum {
RT_IDX_UNUSED013 = 13,
RT_IDX_UNUSED014 = 14,
RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_SLOTS = 16,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
};
/*
@@ -737,6 +869,21 @@ enum {
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
/*
* CAM output format.
*/
@@ -1421,7 +1568,7 @@ struct nic_stats {
u64 rx_nic_fifo_drop;
};
-/* Address/Length pairs for the coredump. */
+/* Firmware coredump internal register address/length pairs. */
enum {
MPI_CORE_REGS_ADDR = 0x00030000,
MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1623,7 @@ struct mpi_coredump_segment_header {
u8 description[16];
};
-/* Reg dump segment numbers. */
+/* Firmware coredump header segment numbers. */
enum {
CORE_SEG_NUM = 1,
TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1674,67 @@ enum {
};
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
struct ql_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
@@ -1568,6 +1776,199 @@ struct ql_reg_dump {
u32 ets[8+2];
};
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
/*
* intr_context structure is used during initialization
* to hook the interrupts. It is also used in a single
@@ -1603,6 +2004,8 @@ enum {
QL_CAM_RT_SET = 8,
QL_SELFTEST = 9,
QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
};
/* link_status bit definitions */
@@ -1724,6 +2127,8 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
u32 link_config;
u32 led_config;
u32 max_frame_size;
@@ -1736,9 +2141,11 @@ struct ql_adapter {
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
struct completion ide_completion;
struct nic_operations *nic_ops;
u16 device_id;
+ struct timer_list timer;
atomic_t lb_count;
};
@@ -1807,6 +2214,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2225,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_sys_err(struct ql_adapter *qdev);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2250,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c4710761..57df835147eb 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
#include "qlge.h"
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
{
@@ -91,6 +491,226 @@ err:
return status;
}
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
/* Create a coredump segment header */
static void ql_build_coredump_seg_header(
struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,527 @@ static void ql_build_coredump_seg_header(
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ QPRINTK(qdev, DRV, ERR,
+ "No memory available.\n");
+ return -ENOMEM;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n",
+ qdev->ndev->name);
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Force Coredump can only be done from interface "
+ "that is up.\n");
+ return;
+ }
+
+ if (ql_mb_sys_err(qdev)) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Fail force coredump with ql_mb_sys_err().\n");
+ return;
+ }
+}
+
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump)
{
@@ -178,6 +1319,36 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+
+ if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ ql_get_core_dump(qdev);
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x \n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
}
#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..3cb60e10d456 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
@@ -452,9 +464,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (set) {
addr = &qdev->ndev->dev_addr[0];
QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5]);
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
@@ -1433,6 +1443,254 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ rx_frag += nr_frags;
+ rx_frag->page = lbq_desc->p.pg_chunk.page;
+ rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
+ rx_frag->size = length;
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
+ else
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
+ "need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+ ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "%d bytes of headers and data in large. Chain "
+ "page to new skb and pull tail.\n", length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
+ else
+ napi_gro_receive(napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ QPRINTK(qdev, PROBE, ERR,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+ ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1646,14 +1904,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
@@ -1753,6 +2010,65 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ struct bq_desc *lbq_desc;
+
+ /* Free small buffer that holds the IAL */
+ lbq_desc = ql_get_curr_sbuf(rx_ring);
+ QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
+ length, qdev->ndev->mtu);
+
+ /* Unwind the large buffers for this frame. */
+ while (length > 0) {
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ length -= (length < rx_ring->lbq_buf_size) ?
+ length : rx_ring->lbq_buf_size;
+ put_page(lbq_desc->p.pg_chunk.page);
+ }
+ }
+
+ return (unsigned long)length;
+}
+
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
@@ -3332,15 +3648,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
@@ -3538,6 +3854,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -3612,6 +3929,16 @@ static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ QPRINTK(qdev, DRV, ERR, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
@@ -3796,21 +4123,18 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
- } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
- (ndev->mtu == 9000 && new_mtu == 9000)) {
- return 0;
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
+ ndev->mtu = new_mtu;
+
if (!netif_running(qdev->ndev)) {
- ndev->mtu = new_mtu;
return 0;
}
- ndev->mtu = new_mtu;
status = ql_change_rx_buffers(qdev);
if (status) {
QPRINTK(qdev, IFUP, ERR,
@@ -4094,6 +4418,7 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -4119,7 +4444,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out;
+ goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4465,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out;
+ goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4477,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,24 +4487,35 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
- goto err_out;
+ goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ dev_err(&pdev->dev, "Coredump alloc failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out;
+ goto err_out2;
}
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@@ -4204,6 +4540,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
if (!cards_found) {
@@ -4212,8 +4549,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
DRV_NAME, DRV_VERSION);
}
return 0;
-err_out:
+err_out2:
ql_release_all(pdev);
+err_out1:
pci_disable_device(pdev);
return err;
}
@@ -4233,6 +4571,21 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ QPRINTK(qdev, IFUP, ERR, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+}
+
static int __devinit qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
@@ -4284,6 +4637,14 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
@@ -4304,6 +4665,8 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
static void __devexit qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4326,6 +4689,7 @@ static void ql_eeh_close(struct net_device *ndev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4345,6 +4709,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
@@ -4358,6 +4723,8 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4385,6 +4752,13 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -4394,8 +4768,6 @@ static void qlge_io_resume(struct pci_dev *pdev)
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
- if (ql_adapter_reset(qdev))
- QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
@@ -4407,6 +4779,8 @@ static void qlge_io_resume(struct pci_dev *pdev)
QPRINTK(qdev, IFUP, ERR,
"Device was not running prior to EEH.\n");
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
}
@@ -4423,6 +4797,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
int err;
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
@@ -4467,6 +4842,8 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d4..e2c846f17fc7 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
#include "qlge.h"
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
return status;
}
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -529,6 +607,22 @@ end:
return status;
}
+int ql_mb_sys_err(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 0;
+
+ mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ return status;
+}
/* Get MPI firmware version. This will be used for
* driver banner and for ethtool info.
@@ -669,6 +763,63 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
+int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
@@ -1143,5 +1294,19 @@ void ql_mpi_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ QPRINTK(qdev, DRV, ERR, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a8..d68ba7a58631 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1222,7 +1222,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id r6040_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c403ce0a3d3a..c1bb24cf0793 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
static void rtl_hw_start_8168(struct net_device *);
static void rtl_hw_start_8101(struct net_device *);
-static struct pci_device_id rtl8169_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 20a71749154a..266baf534964 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1293,7 +1293,7 @@ static void rr_dump(struct net_device *dev)
printk("Error code 0x%x\n", readl(&regs->Fail1));
- index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
+ index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-static struct pci_device_id rr_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..d1664586e8ff 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
-static struct pci_device_id s2io_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
break;
}
} else {
- if (!(val64 & busy_bit)) {
+ if (val64 & busy_bit) {
ret = SUCCESS;
break;
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f97..fd8cb506a2bb 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1589,7 +1589,7 @@ out:
return 0;
}
-static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b507cc..62d5cd51a9dd 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
EFX_LOG(efx, "create port\n");
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
goto err;
- if (phy_flash_cfg)
- efx->phy_mode = PHY_MODE_SPECIAL;
-
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
@@ -1940,7 +1940,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
**************************************************************************/
/* PCI device ID table */
-static struct pci_device_id efx_pci_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac051530..7eff0a615cb3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern void efx_suspend(struct efx_nic *efx);
-extern void efx_resume(struct efx_nic *efx);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c477..d9f9c02a928e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
" setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0, i;
enum efx_loopback_mode mode;
- efx_fill_test(n++, strings, data, &tests->mdio,
- "core", 0, "mdio", NULL);
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
if (name == NULL)
break;
- efx_fill_test(n++, strings, data, &tests->phy[i],
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd26e870..1b8d83657aaa 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -909,6 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
else
efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -925,6 +927,7 @@ static int falcon_probe_port(struct efx_nic *efx)
static void falcon_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
@@ -1005,7 +1008,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ FR_AZ_TX_CFG,
@@ -1727,7 +1730,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
/**************************************************************************
*
- * Revision-dependent attributes used by efx.c
+ * Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f8f079..8ccab2c67a20 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
}
-/* Get status of XAUI link */
-static bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
- if (LOOPBACK_INTERNAL(efx))
- return true;
-
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
- /* If the link is up, then check the phy side of the xaui link */
- if (efx->link_state.up && link_ok)
- if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
- link_ok = efx_mdio_phyxgxs_lane_sync(efx);
-
return link_ok;
}
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
- bool mac_up = falcon_xaui_link_ok(efx);
+ bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
falcon_reset_xaui(efx);
udelay(200);
- mac_up = falcon_xaui_link_ok(efx);
+ mac_up = falcon_xmac_link_ok(efx);
--tries;
}
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
- return !falcon_check_xaui_link_up(efx, 5);
+ return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_mask_status_intr(efx, true);
return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
return;
falcon_mask_status_intr(efx, false);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_mask_status_intr(efx, true);
}
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 683353b904c7..86610db2cff5 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -142,8 +142,9 @@ static int efx_mcdi_poll(struct efx_nic *efx)
if (spins != 0) {
--spins;
udelay(1);
- } else
- schedule();
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
time = get_seconds();
@@ -803,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
+ u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
size_t outlen;
int rc;
@@ -827,7 +828,7 @@ fail:
int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
+ u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -837,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;
@@ -894,29 +896,73 @@ fail:
return rc;
}
-int efx_mcdi_handle_assertion(struct efx_nic *efx)
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ return rc;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- union {
- u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 reboot[MC_CMD_REBOOT_IN_LEN];
- } inbuf;
- u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
unsigned int flags, index, ofst;
const char *reason;
size_t outlen;
int retry;
int rc;
- /* Check if the MC is in the assertion handler, retrying twice. Once
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
* because a boot-time assertion might cause this command to fail
* with EINTR. And once again because GET_ASSERTS can race with
* MC_CMD_REBOOT running on the other port. */
retry = 2;
do {
- MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
- assertion, sizeof(assertion), &outlen);
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc)
@@ -924,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EINVAL;
- flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
return 0;
- /* Reset the hardware atomically such that only one port with succeed.
- * This command will succeed if a reboot is no longer required (because
- * the other port did it first), but fail with EIO if it succeeds.
- */
- BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
- MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
-
- /* Print out the assertion */
reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
? "system-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -947,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
? "watchdog reset"
: "unknown assertion";
EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(assertion, ofst));
+ MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
return 0;
}
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de916728c2e3..f1f89ad4075a 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,10 +111,12 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
+#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
+extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..bd59302695b3 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
-#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
-/* MC_CMD_START_PHY_BIST:
+/* MC_CMD_START_BIST:
* Start a BIST test on the PHY.
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
*/
#define MC_CMD_START_BIST 0x25
#define MC_CMD_START_BIST_IN_LEN 4
-#define MC_CMD_START_BIST_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_OUT_LEN 0
-/* Run the PHY's short BIST */
-#define MC_CMD_PHY_BIST_SHORT 1
-/* Run the PHY's long BIST */
-#define MC_CMD_PHY_BIST_LONG 2
+/* Run the PHY's short cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 1
+/* Run the PHY's long cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_LONG 2
/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
#define MC_CMD_BPX_SERDES_BIST 3
+/* Run the MC loopback tests */
+#define MC_CMD_MC_LOOPBACK_BIST 4
+/* Run the PHY's standard BIST */
+#define MC_CMD_PHY_BIST 5
/* MC_CMD_POLL_PHY_BIST: (variadic output)
* Poll for BIST completion
*
- * Returns a single status code, and a binary blob of phy-specific
- * bist output. If the driver can't succesfully parse the BIST output,
- * it should still respect the Pass/Fail in OUT.RESULT.
+ * Returns a single status code, and optionally some PHY specific
+ * bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * Locks required: PHY_LOCK if doing a PHY BIST
+ * If a driver can't succesfully parse the BIST output, it should
+ * still respect the pass/Fail in OUT.RESULT
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
* Return code: 0, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
#define MC_CMD_POLL_BIST_PASSED 2
#define MC_CMD_POLL_BIST_FAILED 3
#define MC_CMD_POLL_BIST_TIMEOUT 4
+/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+/* SFT9001-specific: */
+/* (offset 4 unused?) */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
+/* mrsfp "PHY" driver: */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
/* MC_CMD_PHY_SPI: (variadic in, variadic out)
* Read/Write/Erase the PHY SPI device
@@ -1090,8 +1129,10 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
#define MC_CMD_MAC_RX_MATCH_FAULT 59
+#define MC_CMD_GMAC_DMABUF_START 64
+#define MC_CMD_GMAC_DMABUF_END 95
/* Insert new members here. */
-#define MC_CMD_MAC_GENERATION_END 60
+#define MC_CMD_MAC_GENERATION_END 96
#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
/* MC_CMD_MAC_STATS:
@@ -1204,6 +1245,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
@@ -1214,7 +1262,8 @@
#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
#define MC_CMD_WOL_TYPE_BITMAP 0x5
-#define MC_CMD_WOL_TYPE_MAX 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_FILTER_MODE_SIMPLE 0x0
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1355,14 +1404,24 @@
* Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
/* MC_CMD_REBOOT:
- * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
- * when the driver notices an assertion failure, to allow two ports to
- * both recover (semi-)gracefully.
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices
+ * an assertion failure (at which point it is expected to perform a complete
+ * tear down and reinitialise), to allow both ports to reset the MC once
+ * in an atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares
+ * with REBOOT_ON_ASSERT=0.
*
* Locks required: NONE
* Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1467,11 +1526,10 @@
((_ofst) + 6)
/* MC_CMD_READ_SENSORS
- * Returns the current (value, state) for each sensor
+ * Returns the current reading from each sensor
*
- * Returns the current (value, state) [each 16bit] of each sensor supported by
- * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
- * memory.
+ * Returns a sparse array of sensor readings (indexed by the sensor
+ * type) into host memory. Each array element is a dword.
*
* The MC will send a SENSOREVT event every time any sensor changes state. The
* driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1484,6 +1542,12 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* Sensor reading fields */
+#define MC_CMD_READ_SENSOR_VALUE_LBN 0
+#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
+#define MC_CMD_READ_SENSOR_STATE_LBN 16
+#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
+
/* MC_CMD_GET_PHY_STATE:
* Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1575,4 +1639,98 @@
#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+/* MC_CMD_TEST_ASSERT:
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully).
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_TESTASSERT_IN_LEN 0
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND 0x4a
+ *
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it
+ * doesn't understand the given workaround number - which should not
+ * be treated as a hard error by client code.
+ *
+ * This op does not imply any semantics about each workaround, that's between
+ * the driver and the mcfw on a per-workaround basis.
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 1
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO:
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs).
+ *
+ * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
+ * the valid "page number" input values, and the output data, are interpreted
+ * on a per-type basis.
+ *
+ * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
+ * 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+
+/* MC_CMD_NVRAM_TEST:
+ * Test a particular NVRAM partition for valid contents (where "valid"
+ * depends on the type of partition).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0
+#define MC_CMD_NVRAM_TEST_FAIL 1
+#define MC_CMD_NVRAM_TEST_NOTSUPP 2
+
+/* MC_CMD_MRSFP_TWEAK: (debug)
+ * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first.
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+
+/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
+ * used for post-3.0 extensions. If you run out of space, look for gaps or
+ * commands that are unused in the existing range. */
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5a0d52..34c22fa986e2 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg;
+ struct efx_mcdi_phy_cfg *phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ u32 caps;
int rc;
- /* TODO: Move phy_data initialisation to
- * phy_op->probe/remove, rather than init/fini */
- phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
- if (phy_cfg == NULL) {
- rc = -ENOMEM;
- goto fail_alloc;
- }
- rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
- efx->phy_type = phy_cfg->type;
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
- efx->mdio_bus = phy_cfg->channel;
- efx->mdio.prtad = phy_cfg->port;
- efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
- if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,45 +381,17 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
- kfree(phy_cfg);
-
- return 0;
-
-fail:
- kfree(phy_cfg);
-fail_alloc:
- return rc;
-}
-
-static int efx_mcdi_phy_init(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_cfg *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- u32 caps;
- int rc;
-
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- efx->phy_data = phy_data;
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
- else
- phy_data->forced_cap = caps;
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
return 0;
@@ -460,7 +448,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
/* The link partner capabilities are only relevent if the
* link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
@@ -504,7 +492,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void efx_mcdi_phy_fini(struct efx_nic *efx)
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -584,14 +572,37 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
return 0;
}
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EMSGSIZE;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
- .init = efx_mcdi_phy_init,
+ .init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
- .fini = efx_mcdi_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
.run_tests = NULL,
.test_name = NULL,
};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f0594..0548fcbbdcd0 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
+ efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339d..f89e71929603 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
+/* Liveness self-test for MDIO PHYs */
+extern int efx_mdio_test_alive(struct efx_nic *efx);
+
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a464529a46b..407bbaddfea6 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -23,7 +23,6 @@
#include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
-#define EFX_MCDI_CHUNK_LEN 128
struct efx_mtd_partition {
struct mtd_info mtd;
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
int rc = 0;
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
}
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f009b7..cb018e272097 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
* Special buffers are used for the event queues and the TX and RX
* descriptor queues for each channel. They are *not* used for the
* actual transmit and receive buffers.
- *
- * Note that for Falcon, TX and RX descriptor queues live in host memory.
- * Allocation and freeing procedures must take this into account.
*/
struct efx_special_buffer {
void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
*
- * Falcon uses these buffers for its interrupt status registers and
+ * The NIC uses these buffers for its interrupt status registers and
* MAC stats dumps.
*/
struct efx_buffer {
@@ -516,14 +512,16 @@ struct efx_mac_operations {
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
* @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate.
+ * @run_tests: Run tests and record results as appropriate (offline).
* Flags are the ethtool tests flags.
*/
struct efx_phy_operations {
int (*probe) (struct efx_nic *efx);
int (*init) (struct efx_nic *efx);
void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
void (*get_settings) (struct efx_nic *efx,
@@ -531,6 +529,7 @@ struct efx_phy_operations {
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
};
@@ -671,7 +670,7 @@ union efx_multicast_hash {
* @irq_status: Interrupt status buffer
* @last_irq_cpu: Last CPU to handle interrupt.
* This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by falcon_test_interrupt()
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
@@ -720,8 +719,7 @@ union efx_multicast_hash {
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
*
- * The @priv field of the corresponding &struct net_device points to
- * this.
+ * This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
char name[IFNAMSIZ];
@@ -994,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
* that the net driver will program into the MAC as the maximum frame
* length.
*
- * The 10G MAC used in Falcon requires 8-byte alignment on the frame
+ * The 10G MAC requires 8-byte alignment on the frame
* length, so we round up to the nearest 8.
*
* Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be227862..b06f8e348307 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
- *
- * Note that EVQ_RPTR_REG contains the index of the "last read" event,
- * whereas channel->eventq_read_ptr contains the index of the "next to
- * read" event.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
+ * because this might be a shared interrupt, but we do need to
+ * check the channel every time and preemptively rearm it if
+ * it's idle. */
+ efx_for_each_channel(channel, efx) {
+ if (!channel->work_pending)
+ efx_nic_eventq_read_ack(channel);
+ }
}
return result;
@@ -1576,6 +1581,8 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc791b2f..14793d8bd661 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -33,6 +33,9 @@
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
-static int qt2025c_wait_reset(struct efx_nic *efx)
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
- unsigned long timeout = jiffies + 10 * HZ;
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
old_counter = counter;
else if (counter != old_counter)
break;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ EFX_ERR(efx, "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
- msleep(10);
+ }
+ msleep(QT2025C_HEARTB_WAIT);
}
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ EFX_ERR(efx, "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
}
return 0;
@@ -120,12 +318,6 @@ static int qt202x_reset_phy(struct efx_nic *efx)
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
- /* Check that all the MMDs we expect are present and responding. We
- * expect faults on some if the link is down, but not on the PHY XS */
- rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
- if (rc < 0)
- goto fail;
-
falcon_board(efx)->type->init_phy(efx);
return rc;
@@ -137,6 +329,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_phy_probe(struct efx_nic *efx)
{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +347,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_init(struct efx_nic *efx)
{
- struct qt202x_phy_data *phy_data;
u32 devid;
int rc;
@@ -155,17 +356,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
- phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
-
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
- phy_data->phy_mode = efx->phy_mode;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
return 0;
}
@@ -183,6 +381,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
return efx->link_state.up != was_up;
}
@@ -191,6 +392,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
@@ -224,7 +429,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-static void qt202x_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
@@ -236,7 +441,9 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
- .fini = qt202x_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index af3933579790..cf0139a7d9a4 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,9 +24,6 @@
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
-#include "spi.h"
-#include "io.h"
-#include "mdio_10g.h"
/*
* Loopback test packet structure
@@ -76,38 +73,15 @@ struct efx_loopback_state {
*
**************************************************************************/
-static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad = __ffs(efx->mdio.mmds);
- u16 physid1, physid2;
- if (efx->phy_type == PHY_TYPE_NONE)
- return 0;
-
- mutex_lock(&efx->mac_lock);
- tests->mdio = -1;
-
- physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
- physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
-
- if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
- (physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
- rc = -EINVAL;
- goto out;
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
}
- if (EFX_IS10G(efx)) {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
- if (rc)
- goto out;
- }
-
-out:
- mutex_unlock(&efx->mac_lock);
- tests->mdio = rc ? -1 : 1;
return rc;
}
@@ -254,7 +228,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
return 0;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy, flags);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -680,7 +654,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
- rc = efx_test_mdio(efx, tests);
+ rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96b..643bef72b99d 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
*/
struct efx_self_tests {
/* online tests */
- int mdio;
+ int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
- int phy[EFX_MAX_PHY_TESTS];
+ int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f031b2..1619fb5a64f5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -106,16 +106,11 @@ static int siena_probe_port(struct efx_nic *efx)
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
- /* Fill out MDIO structure and loopback modes */
+ /* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
- /* Initial assumption */
- efx->link_state.speed = 10000;
- efx->link_state.fd = true;
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
-
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -133,12 +128,13 @@ static int siena_probe_port(struct efx_nic *efx)
void siena_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
@@ -180,6 +176,12 @@ static int siena_test_registers(struct efx_nic *efx)
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
@@ -581,6 +583,7 @@ struct efx_nic_type siena_a0_nic_type = {
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572a49a9..10db071bd837 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
int rc;
rtnl_lock();
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
- MDIO_PMA_10GBT_TXPWR_SHORT,
- count != 0 && *buf != '0');
- rc = efx_reconfigure_port(efx);
+ if (efx->state != STATE_RUNNING) {
+ rc = -EBUSY;
+ } else {
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+ MDIO_PMA_10GBT_TXPWR_SHORT,
+ count != 0 && *buf != '0');
+ rc = efx_reconfigure_port(efx);
+ }
rtnl_unlock();
return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
return 0;
}
-static int sfx7101_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct efx_nic *efx)
{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
- return 0;
-}
+ struct tenxpress_phy_data *phy_data;
+ int rc;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ /* Create any special files */
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_phy_short_reach);
+ if (rc)
+ goto fail;
+ }
+
+ if (efx->phy_type == PHY_TYPE_SFX7101) {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+ } else {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = (SFT9001_LOOPBACKS |
+ FALCON_XMAC_LOOPBACKS |
+ FALCON_GMAC_LOOPBACKS);
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full);
+ }
-static int sft9001_phy_probe(struct efx_nic *efx)
-{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
- FALCON_GMAC_LOOPBACKS);
return 0;
+
+fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
- struct tenxpress_phy_data *phy_data;
- int rc = 0;
+ int rc;
falcon_board(efx)->type->init_phy(efx);
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
- phy_data->phy_mode = efx->phy_mode;
-
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- goto fail;
+ return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
- goto fail;
+ return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
- goto fail;
+ return rc;
- /* Initialise advertising flags */
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full);
- if (efx->phy_type != PHY_TYPE_SFX7101)
- efx->link_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full);
+ /* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- rc = device_create_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
- if (rc)
- goto fail;
- }
-
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
-
- fail:
- kfree(efx->phy_data);
- efx->phy_data = NULL;
- return rc;
}
/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void tenxpress_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- /* Power down the LNPGA */
- reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
-
- /* Waiting here ensures that the board fini, which can turn
- * off the power to the PHY, won't get run until the LNPGA
- * powerdown has been given long enough to complete. */
- schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
- }
-
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@@ -819,27 +833,31 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
- .probe = sfx7101_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
struct efx_phy_operations falcon_sft9001_phy_ops = {
- .probe = sft9001_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sft9001_test_name,
.run_tests = sft9001_run_tests,
};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94e821b..a8b70ef6d817 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
EFX_TXQ_MASK];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
- buffer->len = 0;
- buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
+ buffer->len = 0;
+ buffer->continuation = true;
}
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ed52c0063a65..42a35f086a9f 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -110,7 +110,7 @@ static void sh_eth_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0)
+ if (cnt == 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a0..626de766443a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -334,7 +334,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75e..20c5ce474891 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
"SiS 900 PCI Fast Ethernet",
"SiS 7016 PCI Fast Ethernet"
};
-static struct pci_device_id sis900_pci_tbl [] = {
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a728503..6b955a4f19b2 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
-static struct pci_device_id skfddi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
- smc->hw.fddi_canon_addr.a[0],
- smc->hw.fddi_canon_addr.a[1],
- smc->hw.fddi_canon_addr.a[2],
- smc->hw.fddi_canon_addr.a[3],
- smc->hw.fddi_canon_addr.a[4],
- smc->hw.fddi_canon_addr.a[5]);
+ pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -890,15 +884,8 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
(struct fddi_addr *)dmi->dmi_addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
- pr_debug(" %02x %02x %02x ",
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2]);
- pr_debug("%02x %02x %02x\n",
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
+ dmi->dmi_addr);
dmi = dmi->next;
} // for
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc00163..5ff46eb18d0c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,7 +78,7 @@ static int debug = -1; /* defaults above */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static const struct pci_device_id skge_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..744362272e2d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
{
u32 reg1;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
}
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
/* Force a renegotiation */
@@ -1021,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
{
struct sky2_tx_le *le = sky2->tx_le + *slot;
- struct tx_ring_info *re = sky2->tx_ring + *slot;
*slot = RING_NEXT(*slot, sky2->tx_ring_size);
- re->flags = 0;
- re->skb = NULL;
le->ctrl = 0;
return le;
}
@@ -1102,18 +1103,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
int i;
re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
- return -EIO;
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
pci_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- re->frag_addr[i] = pci_map_page(pdev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ goto map_page_error;
+ }
return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1618,8 +1640,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
return count;
}
-static void sky2_tx_unmap(struct pci_dev *pdev,
- const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1629,6 +1650,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
pci_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
+ re->flags = 0;
}
/*
@@ -1835,6 +1857,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ re->skb = NULL;
dev_kfree_skb_any(skb);
sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1844,7 +1867,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
netif_wake_queue(dev);
}
@@ -2148,7 +2172,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
/* reset PHY Link Detect */
phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_link_up(sky2);
}
@@ -2301,30 +2327,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
struct rx_ring_info *re,
unsigned int length)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- /* Don't be tricky about reusing pages (yet) */
- nskb = sky2_rx_alloc(sky2);
- if (unlikely(!nskb))
- return NULL;
+ nre.skb = sky2_rx_alloc(sky2);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
skb = re->skb;
sky2_rx_unmap_skb(sky2->hw->pdev, re);
-
prefetch(skb->data);
- re->skb = nskb;
- if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
- dev_kfree_skb(nskb);
- re->skb = skb;
- return NULL;
- }
+ *re = nre;
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
skb_put(skb, length);
return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
}
/*
@@ -2376,6 +2404,9 @@ okay:
skb = receive_copy(sky2, re, length);
else
skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
resubmit:
sky2_rx_submit(sky2, re);
@@ -2487,11 +2518,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXSTAT:
total_packets[port]++;
total_bytes[port] += length;
+
skb = sky2_receive(dev, length, status);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!skb)
break;
- }
/* This chip reports checksum status differently */
if (hw->flags & SKY2_HW_NEW_LE) {
@@ -2639,6 +2669,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2646,12 +2677,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@@ -2659,6 +2692,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
@@ -3037,6 +3071,7 @@ static void sky2_reset(struct sky2_hw *hw)
}
sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3073,6 +3108,7 @@ static void sky2_reset(struct sky2_hw *hw)
reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
/* reset PHY Link Detect */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4,
reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3090,6 +3126,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -3176,7 +3213,9 @@ static void sky2_reset(struct sky2_hw *hw)
static void sky2_detach(struct net_device *dev)
{
if (netif_running(dev)) {
+ netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
sky2_down(dev);
}
}
@@ -3227,6 +3266,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
+static void sky2_hw_set_wol(struct sky2_hw *hw)
+{
+ int wol = 0;
+ int i;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ wol = 1;
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+ hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
+
+ device_set_wakeup_enable(&hw->pdev->dev, wol);
+}
+
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3246,13 +3306,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
sky2->wol = wol->wolopts;
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, sky2->wol
- ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
-
- device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
+ sky2_hw_set_wol(hw);
if (!netif_running(dev))
sky2_wol_init(sky2);
@@ -3837,6 +3891,50 @@ static int sky2_get_regs_len(struct net_device *dev)
return 0x4000;
}
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
/*
* Returns copy of control register region
* Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3851,55 +3949,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (b = 0; b < 128; b++) {
- /* This complicated switch statement is to make sure and
- * only access regions that are unreserved.
- * Some blocks are only valid on dual port cards.
- * and block 3 has some special diagnostic registers that
- * are poison.
- */
- switch (b) {
- case 3:
- /* skip diagnostic ram region */
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
- break;
-
- /* dual port cards only */
- case 5: /* Tx Arbiter 2 */
- case 9: /* RX2 */
- case 14 ... 15: /* TX2 */
- case 17: case 19: /* Ram Buffer 2 */
- case 22 ... 23: /* Tx Ram Buffer 2 */
- case 25: /* Rx MAC Fifo 1 */
- case 27: /* Tx MAC Fifo 2 */
- case 31: /* GPHY 2 */
- case 40 ... 47: /* Pattern Ram 2 */
- case 52: case 54: /* TCP Segmentation 2 */
- case 112 ... 116: /* GMAC 2 */
- if (sky2->hw->ports == 1)
- goto reserved;
- /* fall through */
- case 0: /* Control */
- case 2: /* Mac address */
- case 4: /* Tx Arbiter 1 */
- case 7: /* PCI express reg */
- case 8: /* RX1 */
- case 12 ... 13: /* TX1 */
- case 16: case 18:/* Rx Ram Buffer 1 */
- case 20 ... 21: /* Tx Ram Buffer 1 */
- case 24: /* Rx MAC Fifo 1 */
- case 26: /* Tx MAC Fifo 1 */
- case 28 ... 29: /* Descriptor and status unit */
- case 30: /* GPHY 1*/
- case 32 ... 39: /* Pattern Ram 1 */
- case 48: case 50: /* TCP Segmentation 1 */
- case 56 ... 60: /* PCI space */
- case 80 ... 84: /* GMAC 1 */
+ else if (sky2_reg_access_ok(sky2->hw, b))
memcpy_fromio(p, io, 128);
- break;
- default:
-reserved:
+ else
memset(p, 0, 128);
- }
p += 128;
io += 128;
@@ -4684,6 +4740,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
return 0;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d834..54cb303443e0 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
struct sk_buff *skb;
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
-#define TX_MAP_PAGE 000002
+#define TX_MAP_PAGE 0x0002
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3c..1495a5dd4b46 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
int last_carrier;
};
-static const struct pci_device_id smsc9420_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bfc..16191998ac67 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
char spider_net_driver_name[] = "spidernet";
-static struct pci_device_id spider_net_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60adde41..d0556a9b456c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
CH_6915 = 0,
};
-static struct pci_device_id starfire_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
{ 0, }
};
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
if (retval) {
printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
FIRMWARE_RX);
- return retval;
+ goto out_init;
}
if (fw_rx->size % 4) {
printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
@@ -1108,6 +1108,9 @@ out_tx:
release_firmware(fw_tx);
out_rx:
release_firmware(fw_rx);
+out_init:
+ if (retval)
+ netdev_close(dev);
return retval;
}
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7f..fb287649a305 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
select PHYLIB
depends on NETDEVICES && CPU_SUBTYPE_ST40
help
- This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
- controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and fully tested on the STMicroelectronics
+ platforms.
if STMMAC_ETH
@@ -32,7 +33,8 @@ config STMMAC_TIMER
default n
help
Use an external timer for mitigating the number of network
- interrupts.
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
choice
prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564dfa..c776af15fe1a 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- mac100.o gmac.o $(stmmac-y)
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e887..2a58172e986a 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
*******************************************************************************/
#include "descs.h"
-#include <linux/io.h>
-
-/* *********************************************
- DMA CRS Control and Status Register Mapping
- * *********************************************/
-#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
-#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
-#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
-#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
-#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
-#define DMA_STATUS 0x00001014 /* Status Register */
-#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
-#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
-#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
-#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
-
-/* ********************************
- DMA Control register defines
- * ********************************/
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* **************************************
- DMA Interrupt Enable register defines
- * **************************************/
-/**** NORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/**** ABNORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-
-/* ****************************
- * DMA Status register defines
- * ****************************/
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-
-/* Other defines */
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-/* DMA STORE-AND-FORWARD Operation Mode */
-#define SF_DMA_MODE 1
-
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
+#include <linux/netdevice.h>
struct stmmac_extra_stats {
/* Transmit errors */
@@ -169,7 +44,7 @@ struct stmmac_extra_stats {
unsigned long rx_toolong;
unsigned long rx_collision;
unsigned long rx_crc;
- unsigned long rx_lenght;
+ unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
unsigned long rx_gmac_overflow;
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
unsigned long normal_irq_n;
};
-/* GMAC core can compute the checksums in HW. */
-enum rx_frame_status {
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
csum_none = 2,
};
-static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
- unsigned int high, unsigned int low)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- writel(data, ioaddr + high);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + low);
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
- return;
-}
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
-static inline void stmmac_get_mac_addr(unsigned long ioaddr,
- unsigned char *addr, unsigned int high,
- unsigned int low)
-{
- unsigned int hi_addr, lo_addr;
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
- /* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + high);
- lo_addr = readl(ioaddr + low);
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
- /* Extract the MAC address from the high and low words */
- addr[0] = lo_addr & 0xff;
- addr[1] = (lo_addr >> 8) & 0xff;
- addr[2] = (lo_addr >> 16) & 0xff;
- addr[3] = (lo_addr >> 24) & 0xff;
- addr[4] = hi_addr & 0xff;
- addr[5] = (hi_addr >> 8) & 0xff;
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
- return;
-}
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
- /* DMA core initialization */
- int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump MAC registers */
- void (*dump_mac_regs) (unsigned long ioaddr);
- /* Dump DMA registers */
- void (*dump_dma_regs) (unsigned long ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- /* RX descriptor ring initialization */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* TX descriptor ring initialization */
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
/* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ void (*enable_dma_transmission) (unsigned long ioaddr);
+ void (*enable_dma_irq) (unsigned long ioaddr);
+ void (*disable_dma_irq) (unsigned long ioaddr);
+ void (*start_tx) (unsigned long ioaddr);
+ void (*stop_tx) (unsigned long ioaddr);
+ void (*start_rx) (unsigned long ioaddr);
+ void (*stop_rx) (unsigned long ioaddr);
+ int (*dma_interrupt) (unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* Dump MAC registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
void (*pmt) (unsigned long ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
};
struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct hw_cap {
- unsigned int version; /* Core Version register (GMAC) */
- unsigned int pmt; /* Power-Down mode (GMAC) */
+struct mac_device_info {
+ struct stmmac_ops *mac;
+ struct stmmac_desc_ops *desc;
+ struct stmmac_dma_ops *dma;
+ unsigned int pmt; /* support Power-Down */
+ struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- struct mii_regs mii;
};
-struct mac_device_info {
- struct hw_cap hw;
- struct stmmac_ops *ops;
-};
+struct mac_device_info *dwmac1000_setup(unsigned long addr);
+struct mac_device_info *dwmac100_setup(unsigned long addr);
-struct mac_device_info *gmac_setup(unsigned long addr);
-struct mac_device_info *mac100_setup(unsigned long addr);
+extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e57..63a03e264694 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors
- Use enhanced descriptors in case of GMAC Cores.
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062b..ac48ed787040 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include "common.h"
-#include "mac100.h"
+#include "dwmac100.h"
+#include "dwmac_dma.h"
-#undef MAC100_DEBUG
-/*#define MAC100_DEBUG*/
-#ifdef MAC100_DEBUG
+#undef DWMAC100_DEBUG
+/*#define DWMAC100_DEBUG*/
+#ifdef DWMAC100_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#endif
-static void mac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(unsigned long ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
return;
}
-static void mac100_dump_mac_regs(unsigned long ioaddr)
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t MAC100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
+ readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
+ readl(ioaddr + MAC_ADDR_HIGH));
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
+ readl(ioaddr + MAC_ADDR_LOW));
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
+ readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
+ readl(ioaddr + MAC_VLAN2));
pr_info("\n\tMAC management counter registers\n");
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
return;
}
-static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
return;
}
-static void mac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
{
int i;
- DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
}
/* DMA controller has two counters to track the number of
- the receive missed frames. */
-static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x,
unsigned long ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
return;
}
-static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int mac100_get_tx_len(struct dma_desc *p)
+static int dwmac100_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns csum_none becasue the device
* is not able to compute the csum in HW. */
-static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = csum_none;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("mac100 Error: Oversized Ethernet "
+ pr_warning("dwmac100 Error: Oversized Ethernet "
"frame spanned multiple buffers\n");
stats->rx_length_errors++;
return discard_frame;
@@ -262,7 +265,7 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
ret = discard_frame;
if (unlikely(p->des01.rx.length_error)) {
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
if (unlikely(p->des01.rx.mii_error)) {
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void mac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(unsigned long ioaddr)
{
return;
}
-static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -319,8 +322,8 @@ static void mac100_set_filter(struct net_device *dev)
/* Perfect filter mode for physical address and Hash
filter for multicast */
value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
- | MAC_CONTROL_HO);
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
@@ -347,7 +350,7 @@ static void mac100_set_filter(struct net_device *dev)
return;
}
-static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +362,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
return;
}
-/* No PMT module supported in our SoC for the Ethernet Controller. */
-static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
{
return;
}
-static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -381,7 +386,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
for (i = 0; i < ring_size; i++) {
@@ -393,32 +398,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int mac100_get_tx_owner(struct dma_desc *p)
+static int dwmac100_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
}
-static int mac100_get_rx_owner(struct dma_desc *p)
+static int dwmac100_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
}
-static void mac100_set_tx_owner(struct dma_desc *p)
+static void dwmac100_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
}
-static void mac100_set_rx_owner(struct dma_desc *p)
+static void dwmac100_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
}
-static int mac100_get_tx_ls(struct dma_desc *p)
+static int dwmac100_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
}
-static void mac100_release_tx_desc(struct dma_desc *p)
+static void dwmac100_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
@@ -444,74 +449,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
return;
}
-static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
p->des01.tx.buffer1_size = len;
}
-static void mac100_clear_tx_ic(struct dma_desc *p)
+static void dwmac100_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
}
-static void mac100_close_tx_desc(struct dma_desc *p)
+static void dwmac100_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
}
-static int mac100_get_rx_frame_len(struct dma_desc *p)
+static int dwmac100_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.rx.frame_length;
}
-struct stmmac_ops mac100_driver = {
- .core_init = mac100_core_init,
- .dump_mac_regs = mac100_dump_mac_regs,
- .dma_init = mac100_dma_init,
- .dump_dma_regs = mac100_dump_dma_regs,
- .dma_mode = mac100_dma_operation_mode,
- .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
- .tx_status = mac100_get_tx_frame_status,
- .rx_status = mac100_get_rx_frame_status,
- .get_tx_len = mac100_get_tx_len,
- .set_filter = mac100_set_filter,
- .flow_ctrl = mac100_flow_ctrl,
- .pmt = mac100_pmt,
- .init_rx_desc = mac100_init_rx_desc,
- .init_tx_desc = mac100_init_tx_desc,
- .get_tx_owner = mac100_get_tx_owner,
- .get_rx_owner = mac100_get_rx_owner,
- .release_tx_desc = mac100_release_tx_desc,
- .prepare_tx_desc = mac100_prepare_tx_desc,
- .clear_tx_ic = mac100_clear_tx_ic,
- .close_tx_desc = mac100_close_tx_desc,
- .get_tx_ls = mac100_get_tx_ls,
- .set_tx_owner = mac100_set_tx_owner,
- .set_rx_owner = mac100_set_rx_owner,
- .get_rx_frame_len = mac100_get_rx_frame_len,
- .host_irq_status = mac100_irq_status,
- .set_umac_addr = mac100_set_umac_addr,
- .get_umac_addr = mac100_get_umac_addr,
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *mac100_setup(unsigned long ioaddr)
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
+
+struct stmmac_desc_ops dwmac100_desc_ops = {
+ .tx_status = dwmac100_get_tx_frame_status,
+ .rx_status = dwmac100_get_rx_frame_status,
+ .get_tx_len = dwmac100_get_tx_len,
+ .init_rx_desc = dwmac100_init_rx_desc,
+ .init_tx_desc = dwmac100_init_tx_desc,
+ .get_tx_owner = dwmac100_get_tx_owner,
+ .get_rx_owner = dwmac100_get_rx_owner,
+ .release_tx_desc = dwmac100_release_tx_desc,
+ .prepare_tx_desc = dwmac100_prepare_tx_desc,
+ .clear_tx_ic = dwmac100_clear_tx_ic,
+ .close_tx_desc = dwmac100_close_tx_desc,
+ .get_tx_ls = dwmac100_get_tx_ls,
+ .set_tx_owner = dwmac100_set_tx_owner,
+ .set_rx_owner = dwmac100_set_rx_owner,
+ .get_rx_frame_len = dwmac100_get_rx_frame_len,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
{
struct mac_device_info *mac;
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- pr_info("\tMAC 10/100\n");
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->desc = &dwmac100_desc_ops;
+ mac->dma = &dwmac100_dma_ops;
- mac->ops = &mac100_driver;
- mac->hw.pmt = PMT_NOT_SUPPORTED;
- mac->hw.link.port = MAC_CONTROL_PS;
- mac->hw.link.duplex = MAC_CONTROL_F;
- mac->hw.link.speed = 0;
- mac->hw.mii.addr = MAC_MII_ADDR;
- mac->hw.mii.data = MAC_MII_DATA;
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
return mac;
}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..0f8f110d004a 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a148..62dca0e384e7 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
#define GMAC_CONTROL 0x00000000 /* Configuration */
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum gmac_irq_status {
+enum dwmac1000_irq_status {
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+#undef DWMAC1000_DEBUG
+/* #define DWMAC1000__DEBUG */
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+#ifdef DWMAC1000__DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..d812e9cdb3db
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void dwmac1000_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, dev->mc_count, netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (dev->mc_count > 0) {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+
+static void dwmac1000_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->mac = &dwmac1000_ops;
+ mac->desc = &dwmac1000_desc_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->pmt = PMT_SUPPORTED;
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee68953..39d436a2da68 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
+ This contains the functions to handle the dma and descriptors.
+
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-
-#include "stmmac.h"
-#include "gmac.h"
-
-#undef GMAC_DEBUG
-/*#define GMAC_DEBUG*/
-#undef FRAME_FILTER_DEBUG
-/*#define FRAME_FILTER_DEBUG*/
-#ifdef GMAC_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
-static void gmac_dump_regs(unsigned long ioaddr)
-{
- int i;
- pr_info("\t----------------------------------------------\n"
- "\t GMAC registers (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
-
- for (i = 0; i < 55; i++) {
- int offset = i * 4;
- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
- offset, readl(ioaddr + offset));
- }
- return;
-}
-
-static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
}
/* Transmit FIFO flush operation */
-static void gmac_flush_tx_fifo(unsigned long ioaddr)
+static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
}
/* Not yet implemented --- no RMON module */
-static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+static void dwmac1000_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x, unsigned long ioaddr)
{
return;
}
-static void gmac_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
{
int i;
pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+static int dwmac1000_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.frame_flushed)) {
DBG(KERN_ERR "\tframe_flushed error\n");
x->tx_frame_flushed++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.underflow_error)) {
DBG(KERN_ERR "\tunderflow error\n");
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.payload_error)) {
DBG(KERN_ERR "\tAddr/Payload csum error\n");
x->tx_payload_error++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
ret = -1;
@@ -245,19 +218,19 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int gmac_get_tx_len(struct dma_desc *p)
+static int dwmac1000_get_tx_len(struct dma_desc *p)
{
return p->des01.etx.buffer1_size;
}
-static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
{
int ret = good_frame;
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p)
+static int dwmac1000_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x, struct dma_desc *p)
{
int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
if (unlikely(p->des01.erx.dribbling)) {
@@ -358,7 +331,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
}
if (unlikely(p->des01.erx.length_error)) {
DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void gmac_irq_status(unsigned long ioaddr)
-{
- u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
-
- /* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
- readl(ioaddr + GMAC_PMT);
- }
-
- return;
-}
-
-static void gmac_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CORE_INIT;
- writel(value, ioaddr + GMAC_CONTROL);
-
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
- /* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
-
-#ifdef STMMAC_VLAN_TAG_USED
- /* Tag detection without filtering */
- writel(0x0, ioaddr + GMAC_VLAN_TAG);
-#endif
- return;
-}
-
-static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned int value = 0;
-
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, dev->uc_count);
-
- if (dev->flags & IFF_PROMISC)
- value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value = GMAC_FRAME_FILTER_PM; /* pass all multi */
- writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
- int i;
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Hash filter for multicast */
- value = GMAC_FRAME_FILTER_HMC;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- /* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
- }
-
- /* Handle multiple unicast addresses (perfect filtering)*/
- if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
- value |= GMAC_FRAME_FILTER_PR;
- else {
- int i;
- struct dev_addr_list *uc_ptr = dev->uc_list;
-
- for (i = 0; i < dev->uc_count; i++) {
- gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
- i + 1);
-
- DBG(KERN_INFO "\t%d "
- "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
- "%02x\n", i + 1,
- uc_ptr->da_addr[0], uc_ptr->da_addr[1],
- uc_ptr->da_addr[2], uc_ptr->da_addr[3],
- uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
- uc_ptr = uc_ptr->next;
- }
- }
-
-#ifdef FRAME_FILTER_DEBUG
- /* Enable Receive all mode (to debug filtering_fail errors) */
- value |= GMAC_FRAME_FILTER_RA;
-#endif
- writel(value, ioaddr + GMAC_FRAME_FILTER);
-
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
-}
-
-static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = 0;
-
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
- if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_RFE;
- }
- if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_TFE;
- }
-
- if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
- }
-
- writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
-}
-
-static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
-{
- unsigned int pmt = 0;
-
- if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
- pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
- }
-
- writel(pmt, ioaddr + GMAC_PMT);
- return;
-}
-
-static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int gmac_get_tx_owner(struct dma_desc *p)
+static int dwmac1000_get_tx_owner(struct dma_desc *p)
{
return p->des01.etx.own;
}
-static int gmac_get_rx_owner(struct dma_desc *p)
+static int dwmac1000_get_rx_owner(struct dma_desc *p)
{
return p->des01.erx.own;
}
-static void gmac_set_tx_owner(struct dma_desc *p)
+static void dwmac1000_set_tx_owner(struct dma_desc *p)
{
p->des01.etx.own = 1;
}
-static void gmac_set_rx_owner(struct dma_desc *p)
+static void dwmac1000_set_rx_owner(struct dma_desc *p)
{
p->des01.erx.own = 1;
}
-static int gmac_get_tx_ls(struct dma_desc *p)
+static int dwmac1000_get_tx_ls(struct dma_desc *p)
{
return p->des01.etx.last_segment;
}
-static void gmac_release_tx_desc(struct dma_desc *p)
+static void dwmac1000_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
return;
}
-static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des01.etx.checksum_insertion = cic_full;
}
-static void gmac_clear_tx_ic(struct dma_desc *p)
+static void dwmac1000_clear_tx_ic(struct dma_desc *p)
{
p->des01.etx.interrupt = 0;
}
-static void gmac_close_tx_desc(struct dma_desc *p)
+static void dwmac1000_close_tx_desc(struct dma_desc *p)
{
p->des01.etx.last_segment = 1;
p->des01.etx.interrupt = 1;
}
-static int gmac_get_rx_frame_len(struct dma_desc *p)
+static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.erx.frame_length;
}
-struct stmmac_ops gmac_driver = {
- .core_init = gmac_core_init,
- .dump_mac_regs = gmac_dump_regs,
- .dma_init = gmac_dma_init,
- .dump_dma_regs = gmac_dump_dma_regs,
- .dma_mode = gmac_dma_operation_mode,
- .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
- .tx_status = gmac_get_tx_frame_status,
- .rx_status = gmac_get_rx_frame_status,
- .get_tx_len = gmac_get_tx_len,
- .set_filter = gmac_set_filter,
- .flow_ctrl = gmac_flow_ctrl,
- .pmt = gmac_pmt,
- .init_rx_desc = gmac_init_rx_desc,
- .init_tx_desc = gmac_init_tx_desc,
- .get_tx_owner = gmac_get_tx_owner,
- .get_rx_owner = gmac_get_rx_owner,
- .release_tx_desc = gmac_release_tx_desc,
- .prepare_tx_desc = gmac_prepare_tx_desc,
- .clear_tx_ic = gmac_clear_tx_ic,
- .close_tx_desc = gmac_close_tx_desc,
- .get_tx_ls = gmac_get_tx_ls,
- .set_tx_owner = gmac_set_tx_owner,
- .set_rx_owner = gmac_set_rx_owner,
- .get_rx_frame_len = gmac_get_rx_frame_len,
- .host_irq_status = gmac_irq_status,
- .set_umac_addr = gmac_set_umac_addr,
- .get_umac_addr = gmac_get_umac_addr,
+struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
};
-struct mac_device_info *gmac_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- mac->ops = &gmac_driver;
- mac->hw.pmt = PMT_SUPPORTED;
- mac->hw.link.port = GMAC_CONTROL_PS;
- mac->hw.link.duplex = GMAC_CONTROL_DM;
- mac->hw.link.speed = GMAC_CONTROL_FES;
- mac->hw.mii.addr = GMAC_MII_ADDR;
- mac->hw.mii.data = GMAC_MII_DATA;
-
- return mac;
-}
+struct stmmac_desc_ops dwmac1000_desc_ops = {
+ .tx_status = dwmac1000_get_tx_frame_status,
+ .rx_status = dwmac1000_get_rx_frame_status,
+ .get_tx_len = dwmac1000_get_tx_len,
+ .init_rx_desc = dwmac1000_init_rx_desc,
+ .init_tx_desc = dwmac1000_init_tx_desc,
+ .get_tx_owner = dwmac1000_get_tx_owner,
+ .get_rx_owner = dwmac1000_get_rx_owner,
+ .release_tx_desc = dwmac1000_release_tx_desc,
+ .prepare_tx_desc = dwmac1000_prepare_tx_desc,
+ .clear_tx_ic = dwmac1000_clear_tx_ic,
+ .close_tx_desc = dwmac1000_close_tx_desc,
+ .get_tx_ls = dwmac1000_get_tx_ls,
+ .set_tx_owner = dwmac1000_set_tx_owner,
+ .set_rx_owner = dwmac1000_set_rx_owner,
+ .get_rx_frame_len = dwmac1000_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..de848d9f6060
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
+extern void dwmac_enable_dma_irq(unsigned long ioaddr);
+extern void dwmac_disable_dma_irq(unsigned long ioaddr);
+extern void dwmac_dma_start_tx(unsigned long ioaddr);
+extern void dwmac_dma_stop_tx(unsigned long ioaddr);
+extern void dwmac_dma_start_rx(unsigned long ioaddr);
+extern void dwmac_dma_stop_rx(unsigned long ioaddr);
+extern int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..d4adb1eaa447
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(unsigned long ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(unsigned long ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(unsigned long ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+void dwmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(INFO, "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(INFO, "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(INFO, "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(INFO, "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(INFO, "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(INFO, "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(INFO, "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(INFO, "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(INFO, "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(INFO, "\n\n");
+ return ret;
+}
+
+
+void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e5..ba35e6943cf4 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_09"
+#define DRV_MODULE_VERSION "Jan_2010"
+#include <linux/stmmac.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
- struct mac_device_info *mac_type;
+ struct mac_device_info *hw;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
void *bsp_priv;
int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
#endif
};
+#ifdef CONFIG_STM_DRIVERS
+#include <linux/stm/pad.h>
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
+
+ /* Pad routing setup */
+ if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
+ dev_name(&pdev->dev)))) {
+ printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+#else
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a0758..c021eaa3ca69 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
#define REG_SPACE_SIZE 0x1054
#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -61,7 +62,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_toolong),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc),
- STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
STMMAC_STAT(rx_gmac_overflow),
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
} else {
unsigned long ioaddr = netdev->base_addr;
- priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
+ priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
}
spin_unlock(&priv->lock);
return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i;
/* Update HW stats if supported */
- priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
- ioaddr);
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
+ ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07f..a6733612d64a 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
@@ -45,7 +44,6 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
-#include <linux/stm/soc.h>
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
- ctrl &= ~priv->mac_type->hw.link.duplex;
+ ctrl &= ~priv->hw->link.duplex;
else
- ctrl |= priv->mac_type->hw.link.duplex;
+ ctrl |= priv->hw->link.duplex;
priv->oldduplex = phydev->duplex;
}
/* Flow Control operation */
if (phydev->pause)
- priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
- fc, pause_time);
+ priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case 1000:
if (likely(priv->is_gmac))
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
break;
case 100:
case 10:
if (priv->is_gmac) {
- ctrl |= priv->mac_type->hw.link.port;
+ ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
- ctrl |=
- priv->mac_type->hw.link.
- speed;
+ ctrl |= priv->hw->link.speed;
} else {
- ctrl &=
- ~(priv->mac_type->hw.
- link.speed);
+ ctrl &= ~(priv->hw->link.speed);
}
} else {
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
}
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
default:
if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE]; /* PHY to connect */
- char bus_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
priv->oldlink = 0;
priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
}
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->cur_tx = 0;
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + i;
if (p->des2)
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
- DMA_TO_DEVICE);
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_start_tx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA tx process.
- */
-static void stmmac_dma_start_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-static void stmmac_dma_stop_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-/**
- * stmmac_dma_start_rx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA rx process.
- */
-static void stmmac_dma_start_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void stmmac_dma_stop_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (!priv->is_gmac) {
/* MAC 10/100 */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
priv->tx_coe = NO_HW_CSUM;
} else {
if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->mac_type->ops->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
priv->tx_coe = HW_CSUM;
} else {
/* Checksum computation is performed in software. */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
priv->tx_coe = NO_HW_CSUM;
}
}
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
return;
}
-#ifdef STMMAC_DEBUG
-/**
- * show_tx_process_state
- * @status: tx descriptor status field
- * Description: it shows the Transmit Process State for CSR5[22:20]
- */
-static void show_tx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- TX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- TX (Running):Fetching the Tx desc\n");
- break;
- case 2:
- pr_info("- TX (Running): Waiting for end of tx\n");
- break;
- case 3:
- pr_info("- TX (Running): Reading the data "
- "and queuing the data into the Tx buf\n");
- break;
- case 6:
- pr_info("- TX (Suspended): Tx Buff Underflow "
- "or an unavailable Transmit descriptor\n");
- break;
- case 7:
- pr_info("- TX (Running): Closing Tx descriptor\n");
- break;
- default:
- break;
- }
- return;
-}
-
-/**
- * show_rx_process_state
- * @status: rx descriptor status field
- * Description: it shows the Receive Process State for CSR5[19:17]
- */
-static void show_rx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- RX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- RX (Running): Fetching the Rx desc\n");
- break;
- case 2:
- pr_info("- RX (Running):Checking for end of pkt\n");
- break;
- case 3:
- pr_info("- RX (Running): Waiting for Rx pkt\n");
- break;
- case 4:
- pr_info("- RX (Suspended): Unavailable Rx buf\n");
- break;
- case 5:
- pr_info("- RX (Running): Closing Rx descriptor\n");
- break;
- case 6:
- pr_info("- RX(Running): Flushing the current frame"
- " from the Rx buf\n");
- break;
- case 7:
- pr_info("- RX (Running): Queuing the Rx frame"
- " from the Rx buf into memory\n");
- break;
- default:
- break;
- }
- return;
-}
-#endif
-
/**
* stmmac_tx:
* @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
- if (priv->mac_type->ops->get_tx_owner(p))
+ if (priv->hw->desc->get_tx_owner(p))
break;
/* Verify tx error by looking at the last segment */
- last = priv->mac_type->ops->get_tx_ls(p);
+ last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->mac_type->ops->tx_status(&priv->dev->stats,
- &priv->xstats,
- p, ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
if (likely(p->des2))
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
+ priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
if (unlikely(p->des3))
p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL;
}
- priv->mac_type->ops->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p);
entry = (++priv->dirty_tx) % txsize;
}
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
unsigned int has_work = 0;
int rxret, tx_work = 0;
- rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
(priv->cur_rx % priv->dma_rx_size));
if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
{
netif_stop_queue(priv->dev);
- stmmac_dma_stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->dev->base_addr);
dma_free_tx_skbufs(priv);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- stmmac_dma_start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->dev->base_addr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
return;
}
-/**
- * stmmac_dma_interrupt - Interrupt handler for the driver
- * @dev: net device structure
- * Description: Interrupt handler for the driver (DMA).
- */
-static void stmmac_dma_interrupt(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- struct stmmac_priv *priv = netdev_priv(dev);
- /* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
-#ifdef STMMAC_DEBUG
- /* It displays the DMA transmit process state (CSR5 register) */
- if (netif_msg_tx_done(priv))
- show_tx_process_state(intr_status);
- if (netif_msg_rx_status(priv))
- show_rx_process_state(intr_status);
-#endif
- /* ABNORMAL interrupts */
- if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
- if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(intr, INFO, "transmit underflow\n");
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
- /* Try to bump up the threshold */
- tc += 64;
- priv->mac_type->ops->dma_mode(ioaddr, tc,
- SF_DMA_MODE);
- priv->xstats.threshold = tc;
- }
- stmmac_tx_err(priv);
- priv->xstats.tx_undeflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(intr, INFO, "transmit jabber\n");
- priv->xstats.tx_jabber_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(intr, INFO, "recv overflow\n");
- priv->xstats.rx_overflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(intr, INFO, "receive buffer unavailable\n");
- priv->xstats.rx_buf_unav_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(intr, INFO, "receive process stopped\n");
- priv->xstats.rx_process_stopped_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(intr, INFO, "receive watchdog\n");
- priv->xstats.rx_watchdog_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(intr, INFO, "transmit early interrupt\n");
- priv->xstats.tx_early_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(intr, INFO, "transmit process stopped\n");
- priv->xstats.tx_process_stopped_irq++;
- stmmac_tx_err(priv);
- }
- if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(intr, INFO, "fatal bus error\n");
- priv->xstats.fatal_bus_error_irq++;
- stmmac_tx_err(priv);
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ unsigned long ioaddr = priv->dev->base_addr;
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
+ &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
}
- }
-
- /* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
- priv->xstats.normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- _stmmac_schedule(priv);
- }
-
- /* Optional hardware blocks, interrupts should be disabled */
- if (unlikely(intr_status &
- (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
- pr_info("%s: unexpected status %08x\n", __func__, intr_status);
-
- /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
- writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "\n\n");
+ stmmac_tx_err(priv);
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
return;
}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
- priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+ if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
}
/* Copy the MAC addr into the HW */
- priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->bus_setup)
+ priv->bus_setup(ioaddr);
/* Initialize the MAC Core */
- priv->mac_type->ops->core_init(ioaddr);
+ priv->hw->mac->core_init(ioaddr);
priv->shutdown = 0;
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->mac_type->ops->dump_mac_regs(ioaddr);
- priv->mac_type->ops->dump_dma_regs(ioaddr);
+ priv->hw->mac->dump_regs(ioaddr);
+ priv->hw->dma->dump_regs(ioaddr);
}
if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
desc->des2 = dma_map_single(priv->device, skb->data,
BUF_SIZE_8KiB, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
skb->data + BUF_SIZE_8KiB,
buf2_size, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 0,
- buf2_size, csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
return entry;
}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
frag->page_offset,
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
- priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
- csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
}
/* Interrupt on completition only for the latest segment */
- priv->mac_type->ops->close_tx_desc(desc);
+ priv->hw->desc->close_tx_desc(desc);
#ifdef CONFIG_STMMAC_TIMER
/* Clean IC while using timer */
if (likely(priv->tm->enable))
- priv->mac_type->ops->clear_tx_ic(desc);
+ priv->hw->desc->clear_tx_ic(desc);
#endif
/* To avoid raise condition */
- priv->mac_type->ops->set_tx_owner(first);
+ priv->hw->desc->set_tx_owner(first);
priv->cur_tx++;
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* CSR1 enables the transmit DMA to check for new descriptor */
- writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+ priv->hw->dma->enable_dma_transmission(dev->base_addr);
return NETDEV_TX_OK;
}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
- priv->mac_type->ops->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p + entry);
}
return;
}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
#endif
count = 0;
- while (!priv->mac_type->ops->get_rx_owner(p)) {
+ while (!priv->hw->desc->get_rx_owner(p)) {
int status;
if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
- &priv->xstats, p));
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
if (unlikely(status == discard_frame))
priv->dev->stats.rx_errors++;
else {
struct sk_buff *skb;
/* Length should omit the CRC */
- int frame_len =
- priv->mac_type->ops->get_rx_frame_len(p) - 4;
+ int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
- priv->mac_type->ops->set_filter(dev);
+ priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
return;
}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->is_gmac) {
unsigned long ioaddr = dev->base_addr;
/* To handle GMAC own interrupts */
- priv->mac_type->ops->host_irq_status(ioaddr);
+ priv->hw->mac->host_irq_status(ioaddr);
}
- stmmac_dma_interrupt(dev);
+
+ stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
if (priv->is_gmac)
- device = gmac_setup(ioaddr);
+ device = dwmac1000_setup(ioaddr);
else
- device = mac100_setup(ioaddr);
+ device = dwmac100_setup(ioaddr);
if (!device)
return -ENOMEM;
- priv->mac_type = device;
+ priv->hw = device;
- priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ priv->wolenabled = priv->hw->pmt; /* PMT supported */
if (priv->wolenabled == PMT_SUPPORTED)
priv->wolopts = WAKE_MAGIC; /* Magic Frame */
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
static int stmmacphy_dvr_probe(struct platform_device *pdev)
{
- struct plat_stmmacphy_data *plat_dat;
- plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
static int stmmac_associate_phy(struct device *dev, void *data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat;
-
- plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
- plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
+ /* Verify embedded resource for the platform */
+ ret = stmmac_claim_resource(pdev);
+ if (ret < 0)
+ goto out;
+
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bus_setup = plat_dat->bus_setup;
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
static int stmmac_dvr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
- stmmac_dma_stop_rx(ndev->base_addr);
- stmmac_dma_stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(ndev->base_addr);
+ priv->hw->dma->stop_tx(ndev->base_addr);
stmmac_mac_disable_rx(ndev->base_addr);
stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx,
- priv->dma_rx_size, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx,
- priv->dma_tx_size);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(dev->base_addr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(dev->base_addr,
+ priv->wolopts);
} else {
stmmac_mac_disable_rx(dev->base_addr);
}
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(dev->base_addr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
stmmac_mac_enable_rx(ioaddr);
stmmac_mac_enable_tx(ioaddr);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22fc..fffe1d037fe6 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
u16 value =
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca60..0c972e560cf3 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static const struct pci_device_id sundance_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab9..b55ceb88d93f 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8a..76ccd31cbf50 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3211,7 +3211,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id happymeal_pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..033408f589fb 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
{ "TOSHIBA TC35815/TX4939" },
};
-static const struct pci_device_id tc35815_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b938..b907bee31fd5 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -64,7 +64,7 @@
#include "tehuti.h"
-static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
{0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3a74d2168598..7195bdec17f3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2009 Broadcom Corporation.
+ * Copyright (C) 2005-2010 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.105"
-#define DRV_MODULE_RELDATE "December 2, 2009"
+#define DRV_MODULE_VERSION "3.106"
+#define DRV_MODULE_RELDATE "January 12, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -174,7 +174,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
-static struct pci_device_id tg3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +244,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1037,7 +1043,11 @@ static void tg3_mdio_start(struct tg3 *tp)
else
tp->phy_addr = 1;
- is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
@@ -1560,7 +1570,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1935,6 +1947,10 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+ return 0;
+
tg3_phy_apply_otp(tp);
if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -2015,7 +2031,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
{
struct tg3 *tp_peer = tp;
- if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
+ /* The GPIOs do something completely different on 57765. */
+ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -4693,8 +4711,9 @@ next_pkt:
(*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
- u32 idx = *post_ptr % TG3_RX_RING_SIZE;
- tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
+ tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
rx_std_posted = 0;
}
@@ -7434,10 +7453,13 @@ static void tg3_rings_reset(struct tg3 *tp)
for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
- tw32_mailbox(tp->napi[i].prodmbox, 0);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
}
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
} else {
tp->napi[0].tx_prod = 0;
tp->napi[0].tx_cons = 0;
@@ -7569,6 +7591,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
+ if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7742,7 +7778,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
@@ -7767,7 +7803,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
(RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
BDINFO_FLAGS_USE_EXT_RECV);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -8138,7 +8174,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -10635,12 +10675,27 @@ static int tg3_test_memory(struct tg3 *tp)
{ 0x00008000, 0x01000},
{ 0x00010000, 0x01000},
{ 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
};
struct mem_entry *mem_tbl;
int err = 0;
int i;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
@@ -12122,7 +12177,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
else
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -13096,6 +13152,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13284,7 +13342,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -13384,6 +13443,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (err)
return err;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+ return -ENOTSUPP;
+
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
@@ -14075,9 +14139,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14137,7 +14214,9 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
case PHY_ID_BCM5756: return "5722/5756";
case PHY_ID_BCM5906: return "5906";
case PHY_ID_BCM5761: return "5761";
- case PHY_ID_BCM5717: return "5717";
+ case PHY_ID_BCM5718C: return "5718C";
+ case PHY_ID_BCM5718S: return "5718S";
+ case PHY_ID_BCM57765: return "57765";
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cd30889650f8..e7f6214a1680 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,6 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2010 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -1054,6 +1055,8 @@
#define CPMU_MUTEX_REQ_DRIVER 0x00001000
#define TG3_CPMU_MUTEX_GNT 0x00003660
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
+#define TG3_CPMU_PHY_STRAP 0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
/* 0x3664 --> 0x3800 unused */
/* Mbuf cluster free registers */
@@ -1203,14 +1206,18 @@
#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
+#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
#define BUFMGR_MB_HIGH_WATER 0x00004418
#define DEFAULT_MB_HIGH_WATER 0x00000060
#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
+#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
#define BUFMGR_MB_ALLOC_BIT 0x10000000
#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1540,6 +1547,8 @@
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_PCIE_TL_SEL 0x00000000
+#define GRC_MODE_PCIE_PL_SEL 0x00400000
#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1547,7 +1556,13 @@
#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_PCIE_DL_SEL 0x20000000
#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
+#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
+ GRC_MODE_PCIE_PL_SEL | \
+ GRC_MODE_PCIE_DL_SEL | \
+ GRC_MODE_PCIE_HI_1K_EN)
#define GRC_MISC_CFG 0x00006804
#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1801,6 +1816,11 @@
/* 0x7e74 --> 0x8000 unused */
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT 0x00007c00
+#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
#define TG3_OTP_AGCTGT_SHIFT 1
@@ -2809,6 +2829,7 @@ struct tg3 {
#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
+#define TG3_FLG3_L1PLLPD_EN 0x00800000
struct timer_list timer;
u16 timer_counter;
@@ -2875,7 +2896,9 @@ struct tg3 {
#define PHY_ID_BCM5756 0xbc050ed0
#define PHY_ID_BCM5784 0xbc050fa0
#define PHY_ID_BCM5761 0xbc050fd0
-#define PHY_ID_BCM5717 0x5c0d8a00
+#define PHY_ID_BCM5718C 0x5c0d8a00
+#define PHY_ID_BCM5718S 0xbc050ff0
+#define PHY_ID_BCM57765 0x5c0d8a40
#define PHY_ID_BCM5906 0xdc00ac40
#define PHY_ID_BCM8002 0x60010140
#define PHY_ID_INVALID 0xffffffff
@@ -2918,7 +2941,8 @@ struct tg3 {
(X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
(X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
(X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
- (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
+ (X) == PHY_ID_BCM5718C || (X) == PHY_ID_BCM5718S || \
+ (X) == PHY_ID_BCM57765 || (X) == PHY_ID_BCM8002)
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb3155..3ec31dce99f9 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
-static struct pci_device_id tlan_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
static int TLan_PhyDp83840aCheck( struct net_device * );
*/
-static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
+static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
static void TLan_MiiSendData( u16, u32, unsigned );
static void TLan_MiiSync( u16 );
static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -2204,7 +2204,7 @@ TLan_ResetAdapter( struct net_device *dev )
u32 data;
u8 data8;
- priv->tlanFullDuplex = FALSE;
+ priv->tlanFullDuplex = false;
priv->phyOnline=0;
netif_carrier_off(dev);
@@ -2259,7 +2259,7 @@ TLan_ResetAdapter( struct net_device *dev )
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
} else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
}
@@ -2651,14 +2651,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
} else if ( priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_HALF) {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
} else {
@@ -2695,7 +2695,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
tctl &= ~TLAN_TC_AUISEL;
if ( priv->duplex == TLAN_DUPLEX_FULL ) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( priv->speed == TLAN_SPEED_100 ) {
control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2750,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
mode = an_adv & an_lpa & 0x03E0;
if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2855,8 @@ void TLan_PhyMonitor( struct net_device *dev )
* TLan_MiiReadReg
*
* Returns:
- * 0 if ack received ok
- * 1 otherwise.
+ * false if ack received ok
+ * true if no ack received or other error
*
* Parms:
* dev The device structure containing
@@ -2875,17 +2875,17 @@ void TLan_PhyMonitor( struct net_device *dev )
*
**************************************************************/
-static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
{
u8 nack;
u16 sio, tmp;
u32 i;
- int err;
+ bool err;
int minten;
TLanPrivateInfo *priv = netdev_priv(dev);
unsigned long flags = 0;
- err = FALSE;
+ err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -2918,7 +2918,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
- err = TRUE;
+ err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e985..d13ff12d7500 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
*
****************************************************************/
-#define FALSE 0
-#define TRUE 1
-
#define TLAN_MIN_FRAME_SIZE 64
#define TLAN_MAX_FRAME_SIZE 1600
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d9629..b0d7db9d8bb4 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
* will be stuck with 1555 lines of hex #'s in the code.
*/
-static struct pci_device_id xl_pci_tbl[] =
+static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
{
{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a3..515f122777ab 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
#define ABYSS_IO_EXTENT 64
-static struct pci_device_id abyss_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d07..3f9d5a25562e 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.3 11/13/02 - Kent Yoder";
-static struct pci_device_id streamer_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c5132..f010a4dc5f19 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
-static struct pci_device_id olympic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdcae..d4c7c0c0a3d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
{ {0x03, 0x01}, "3Com Token Link Velocity"},
};
-static struct pci_device_id tmspci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6ca..007d8e75666d 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2)
- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
- dev->name, csr12, medianame[dev->if_port]);
+ dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
+ csr12, medianame[dev->if_port]);
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
if (tulip_check_duplex(dev) < 0) {
netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
} else if (tp->nwayset) {
/* Don't screw up a negotiated session! */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Using NWay-set %s media, csr12 %08x\n",
+ medianame[dev->if_port], csr12);
} else if (tp->medialock) {
;
} else if (dev->if_port == 3) {
if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
- "trying NWay.\n", dev->name, csr12);
+ dev_info(&dev->dev,
+ "No 21143 100baseTx link beat, %08x, trying NWay\n",
+ csr12);
t21142_start_nway(dev);
next_tick = 3*HZ;
}
} else if ((csr12 & 0x7000) != 0x5000) {
/* Negotiation failed. Search media types. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
- dev->name, csr12);
+ dev_info(&dev->dev,
+ "21143 negotiation failed, status %08x\n",
+ csr12);
if (!(csr12 & 4)) { /* 10mbps link beat good. */
new_csr6 = 0x82420000;
dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
iowrite32(1, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev, "Testing new 21143 media %s\n",
+ medianame[dev->if_port]);
if (new_csr6 != (tp->csr6 & ~0x00D5)) {
tp->csr6 &= 0x00D5;
tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
- dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
iowrite32(0x0001, ioaddr + CSR13);
udelay(100);
iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
- "%8.8x.\n", dev->name, csr12, csr5, csr14);
+ dev_info(&dev->dev,
+ "21143 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port], tp->sym_advertise,
- tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
else
- printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
- " link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
}
if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
#if 0 /* Restart shouldn't be needed. */
iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
+ dev->name, ioread32(ioaddr + CSR5));
#endif
tulip_start_rxtx(tp);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
- dev->name, tp->csr6, ioread32(ioaddr + CSR6),
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
} else if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
add_timer(&tp->timer);
} else if (dev->if_port == 3 || dev->if_port == 5) {
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "21143 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
del_timer_sync(&tp->timer);
t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
- printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10baseT link beat good\n");
} else if (!(csr12 & 4)) { /* 10mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10mbps sensed media\n");
dev->if_port = 0;
} else if (tp->nwayset) {
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
- dev->name, medianame[dev->if_port], tp->csr6);
+ dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
+ medianame[dev->if_port], tp->csr6);
} else { /* 100mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 100baseTx sensed media\n");
dev->if_port = 3;
tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 1cc8cf4425d1..516713fa0a05 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
If in doubt, say Y.
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on PCI || EISA
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb75..29330209ad8b 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
-static struct pci_device_id de_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +382,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (netif_msg_rx_err(de))
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- de->dev->name, status);
+ dev_warn(&de->dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
@@ -487,7 +487,7 @@ rx_next:
}
if (!rx_work)
- printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+ dev_warn(&de->dev->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
@@ -504,7 +504,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
if (netif_msg_intr(de))
printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
- dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+ dev->name, status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
@@ -529,8 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ dev_err(&de->dev->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
}
return IRQ_HANDLED;
@@ -582,7 +584,8 @@ static void de_tx (struct de_private *de)
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
if (netif_msg_tx_done(de))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ de->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -870,7 +873,7 @@ static void de_stop_rxtx (struct de_private *de)
udelay(100);
}
- printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+ dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +908,8 @@ static void de_link_up(struct de_private *de)
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link up, media %s\n",
- de->dev->name, media_name[de->media_type]);
+ dev_info(&de->dev->dev, "link up, media %s\n",
+ media_name[de->media_type]);
}
}
@@ -915,7 +918,7 @@ static void de_link_down(struct de_private *de)
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link down\n", de->dev->name);
+ dev_info(&de->dev->dev, "link down\n");
}
}
@@ -925,7 +928,8 @@ static void de_set_media (struct de_private *de)
u32 macmode = dr32(MacMode);
if (de_is_running(de))
- printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
+ dev_warn(&de->dev->dev,
+ "chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +949,15 @@ static void de_set_media (struct de_private *de)
macmode &= ~FullDuplex;
if (netif_msg_link(de)) {
- printk(KERN_INFO
- "%s: set link %s\n"
- "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
- "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
- de->dev->name, media_name[media],
- de->dev->name, dr32(MacMode), dr32(SIAStatus),
- dr32(CSR13), dr32(CSR14), dr32(CSR15),
- de->dev->name, macmode, de->media[media].csr13,
- de->media[media].csr14, de->media[media].csr15);
+ dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
+ dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+
+ dev_info(&de->dev->dev,
+ "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
}
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
@@ -992,9 +996,8 @@ static void de21040_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, status %x\n",
- dev->name, media_name[de->media_type],
- status);
+ dev_info(&dev->dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
return;
}
@@ -1022,8 +1025,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1082,10 @@ static void de21041_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
- dev->name, media_name[de->media_type],
- dr32(MacMode), status);
+ dev_info(&dev->dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
return;
}
@@ -1150,8 +1154,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1382,7 @@ static int de_open (struct net_device *dev)
rc = de_alloc_rings(de);
if (rc) {
- printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
@@ -1387,15 +1390,14 @@ static int de_open (struct net_device *dev)
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
- dev->name, dev->irq, rc);
+ dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
@@ -1666,8 +1668,8 @@ static int de_nway_reset(struct net_device *dev)
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
- de->dev->name, status, dr32(SIAStatus));
+ dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
return 0;
}
@@ -1711,7 +1713,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
- printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
}
}
@@ -1830,9 +1832,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
}
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
- de->board_idx, ofs,
- media_name[de->media_type]);
+ pr_info("de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs, media_name[de->media_type]);
/* init SIA register values to defaults */
for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1880,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
de->media[idx].type = idx;
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: media block #%u: %s",
- de->board_idx, i,
- media_name[de->media[idx].type]);
+ pr_info("de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
bufp += sizeof (ib->opts);
@@ -1893,13 +1894,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
sizeof(ib->csr15);
if (netif_msg_probe(de))
- printk(" (%x,%x,%x)\n",
- de->media[idx].csr13,
- de->media[idx].csr14,
- de->media[idx].csr15);
+ pr_cont(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
} else if (netif_msg_probe(de))
- printk("\n");
+ pr_cont("\n");
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
@@ -2005,7 +2006,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* check for invalid IRQ value */
if (pdev->irq < 2) {
rc = -EIO;
- printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pr_err(PFX "invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
@@ -2016,14 +2017,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
+ pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pci_name(pdev));
goto err_out_res;
}
@@ -2031,9 +2032,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1),
- pciaddr, pci_name(pdev));
+ pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2045,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* make sure hardware is not running */
rc = de_reset_mac(de);
if (rc) {
- printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
@@ -2065,12 +2065,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
goto err_out_iomap;
/* print info about board and interface just registered */
- printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- de->de21040 ? "21040" : "21041",
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
pci_set_drvdata(pdev, dev);
@@ -2158,8 +2157,7 @@ static int de_resume (struct pci_dev *pdev)
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
de_init_hw(de);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index ad63621913c3..5fc61c1012e5 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
Test and make sure PCI latency is now correct for all cases.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "dmfe"
#define DRV_VERSION "1.36.4"
#define DRV_RELDATE "2002-01-17"
@@ -92,6 +94,10 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
/* Board/System/Debug information/definition ---------------- */
#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -145,16 +151,17 @@
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) \
- do { \
- if (dmfe_debug || (dbug_now)) \
- printk(KERN_ERR DRV_NAME ": %s %lx\n",\
- (msg), (long) (value)); \
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ pr_err("%s %lx\n", \
+ (msg), (long) (value)); \
} while (0)
-#define SHOW_MEDIA_TYPE(mode) \
- printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
- (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_info("Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", \
+ (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
@@ -377,6 +384,22 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (!printed_version++)
printk(version);
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ pr_info("skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -384,8 +407,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME
- ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -396,13 +418,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -417,7 +439,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
#endif
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -476,12 +498,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (err)
goto err_out_free_buf;
- printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n",
- dev->name,
- ent->driver_data >> 16,
- pci_name(pdev),
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16,
+ pci_name(pdev), dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -675,7 +694,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -685,8 +704,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
- db->tx_queue_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
return NETDEV_TX_BUSY;
}
@@ -758,12 +776,11 @@ static int dmfe_stop(struct DEVICE *dev)
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
- " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
- db->tx_fifo_underrun, db->tx_excessive_collision,
- db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
- db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
- db->reset_fatal, db->reset_TXtimeout);
+ printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
#endif
return 0;
@@ -864,7 +881,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
@@ -874,7 +891,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -971,7 +988,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+ pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -1170,8 +1187,7 @@ static void dmfe_timer(unsigned long data)
if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
- printk(KERN_WARNING "%s: Tx timeout - resetting\n",
- dev->name);
+ dev_warn(&dev->dev, "Tx timeout - resetting\n");
}
}
@@ -1625,7 +1641,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+ pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2068,7 +2084,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
-static struct pci_device_id dmfe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89b..93f4e8309f81 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -161,15 +161,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
if (ee_data[0] == 0xff) {
if (last_mediatable) {
controller_index++;
- printk(KERN_INFO "%s: Controller %d of multiport board.\n",
- dev->name, controller_index);
+ dev_info(&dev->dev,
+ "Controller %d of multiport board\n",
+ controller_index);
tp->mtable = last_mediatable;
ee_data = last_ee_data;
goto subsequent_board;
} else
- printk(KERN_INFO "%s: Missing EEPROM, this interface may "
- "not work correctly!\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Missing EEPROM, this interface may not work correctly!\n");
return;
}
/* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +181,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
i++; /* An Accton EN1207, not an outlaw Maxtech. */
memcpy(ee_data + 26, eeprom_fixups[i].newtable,
sizeof(eeprom_fixups[i].newtable));
- printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
- " substitute media control info.\n",
- dev->name, eeprom_fixups[i].name);
+ dev_info(&dev->dev,
+ "Old format EEPROM on '%s' board. Using substitute media control info\n",
+ eeprom_fixups[i].name);
break;
}
}
if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
- printk(KERN_INFO "%s: Old style EEPROM with no media selection "
- "information.\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Old style EEPROM with no media selection information\n");
return;
}
}
@@ -218,7 +217,8 @@ subsequent_board:
/* there is no phy information, don't even try to build mtable */
if (count == 0) {
if (tulip_debug > 0)
- printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ dev_warn(&dev->dev,
+ "no phy info, aborting mtable build\n");
return;
}
@@ -234,8 +234,8 @@ subsequent_board:
mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
mtable->csr15dir = mtable->csr15val = 0;
- printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
- media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ dev_info(&dev->dev, "EEPROM default media type %s\n",
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
for (i = 0; i < count; i++) {
struct medialeaf *leaf = &mtable->mleaf[i];
@@ -298,16 +298,17 @@ subsequent_board:
}
if (tulip_debug > 1 && leaf->media == 11) {
unsigned char *bp = leaf->leafdata;
- printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
- "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
- dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
- bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ dev_info(&dev->dev,
+ "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
+ bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2],
+ bp[4 + bp[2 + bp[1]*2]*2]);
}
- printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
- "by a %s (%d) block.\n",
- dev->name, i, medianame[leaf->media & 15], leaf->media,
- leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
- leaf->type);
+ dev_info(&dev->dev,
+ "Index #%d - Media %s (#%d) described by a %s (%d) block\n",
+ i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
}
if (new_advertise)
tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c7..1faf7a4d7202 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
#endif
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
- printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+ printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
break;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
dev->name, entry, status);
if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (unsigned long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
int received = 0;
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
- dev->name, entry, status);
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
+ dev->name, entry, status);
if (--rx_work_limit < 0)
break;
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, status);
#endif
tp->stats.tx_errors++;
if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev_err(&dev->dev,
+ "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tp->dirty_tx = dirty_tx;
if (csr5 & TxDied) {
if (tulip_debug > 2)
- printk(KERN_WARNING "%s: The transmitter stopped."
- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
- dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
+ dev_warn(&dev->dev,
+ "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
+ csr5, ioread32(ioaddr + CSR6),
+ tp->csr6);
tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
* to the 21142/3 docs that is).
* -- rmk
*/
- printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
- dev->name, tp->nir, error);
+ dev_err(&dev->dev,
+ "(%lu) System Error occurred (%d)\n",
+ tp->nir, error);
}
/* Clear all error sources, included undocumented ones! */
iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & TimerInt) {
if (tulip_debug > 2)
- printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
- dev->name, csr5);
+ dev_err(&dev->dev,
+ "Re-enabling interrupts, %08x\n",
+ csr5);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
tp->ttimer = 0;
oi++;
}
if (tx > maxtx || rx > maxrx || oi > maxoi) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Too much work during an interrupt, "
- "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+ dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
+ csr5, tp->nir, tx, rx, oi);
/* Acknowledge all interrupt sources. */
iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
entry = tp->dirty_rx % RX_RING_SIZE;
if (tp->rx_buffers[entry].skb == NULL) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
+ tp->nir, tp->cur_rx, tp->ttimer, rx);
if (tp->chip_id == LC82C168) {
iowrite32(0x00, ioaddr + CSR7);
mod_timer(&tp->timer, RUN_AT(HZ/50));
} else {
if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) set timer\n",
+ tp->nir);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
ioaddr + CSR7);
iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
+ dev->name, ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705bf..68b170ae4d15 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
switch (mleaf->type) {
case 0: /* 21140 non-MII xcvr. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
- " with control setting %2.2x.\n",
- dev->name, p[1]);
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
+ dev->name, p[1]);
dev->if_port = p[0];
if (startup)
iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
- "%4.4x/%4.4x.\n",
- dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
+ dev->name, medianame[dev->if_port],
+ setup[0], setup[1]);
if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
csr13val = setup[0];
csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
if (startup) iowrite32(csr13val, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
- dev->name, csr15dir, csr15val);
+ printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
+ dev->name, csr15dir, csr15val);
if (mleaf->type == 4)
new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
if (tp->mii_advertise == 0)
tp->mii_advertise = tp->advertising[phy_num];
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
- dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
+ dev->name, tp->mii_advertise,
+ tp->phys[phy_num]);
tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
}
break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
default:
- printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
- dev->name, mleaf->type);
+ printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
+ dev->name, mleaf->type);
new_csr6 = 0x020E0000;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
- dev->name, medianame[dev->if_port],
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
+ dev->name, medianame[dev->if_port],
ioread32(ioaddr + CSR12) & 0xff);
} else if (tp->chip_id == LC82C168) {
if (startup && ! tp->medialock)
dev->if_port = tp->mii_cnt ? 11 : 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
- dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
+ dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
if (tp->mii_cnt) {
new_csr6 = 0x810C0000;
iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
} else
new_csr6 = 0x03860000;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No media description table, assuming "
- "%s transceiver, CSR12 %2.2x.\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
}
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
- "%4.4x.\n", dev->name, bmsr, lpa);
+ dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
+ bmsr, lpa);
if (bmsr == 0xffff)
return -2;
if ((bmsr & BMSR_LSTATUS) == 0) {
int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
if ((new_bmsr & BMSR_LSTATUS) == 0) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: No link beat on the MII interface,"
- " status %4.4x.\n", dev->name, new_bmsr);
+ dev_info(&dev->dev,
+ "No link beat on the MII interface, status %04x\n",
+ new_bmsr);
return -1;
}
}
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
tulip_restart_rxtx(tp);
if (tulip_debug > 0)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII"
- "#%d link partner capability of %4.4x.\n",
- dev->name, tp->full_duplex ? "full" : "half",
- tp->phys[0], lpa);
+ dev_info(&dev->dev,
+ "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
return 1;
}
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
tp->phys[phy_idx++] = phy;
- printk (KERN_INFO "tulip%d: MII transceiver #%d "
- "config %4.4x status %4.4x advertising %4.4x.\n",
+ pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
board_idx, phy, mii_reg0, mii_status, mii_advert);
/* Fixup for DLink with miswired PHY. */
if (mii_advert != to_advert) {
- printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
- " previously advertising %4.4x.\n",
- board_idx, to_advert, phy, mii_advert);
+ printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
tulip_mdio_write (dev, phy, 4, to_advert);
}
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
}
tp->mii_cnt = phy_idx;
if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
- printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
board_idx);
tp->phys[0] = 1;
}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09dfc..966efa1a27d7 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
new_csr6 |= 0x00000200;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
- dev->name, phy_reg, medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
int phy_reg = ioread32(ioaddr + 0xB8);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
- dev->name, phy_reg, csr5);
+ printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
+ dev->name, phy_reg, csr5);
if (ioread32(ioaddr + CSR5) & TPLnkFail) {
iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
/* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
int csr5 = ioread32(ioaddr + CSR5);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
- "CSR5 %8.8x.\n",
- dev->name, phy_reg, medianame[dev->if_port], csr5);
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
if (phy_reg & 0x04000000) { /* Remote link fault */
iowrite32(0x0201F078, ioaddr + 0xB8);
next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
next_tick = 60*HZ;
} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
- "CSR5 %8.8x, PHY %3.3x.\n",
- dev->name, medianame[dev->if_port], csr12,
- ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8));
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ dev->name, medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
next_tick = 3*HZ;
if (tp->medialock) {
} else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Changing PNIC configuration to %s "
- "%s-duplex, CSR6 %8.8x.\n",
- dev->name, medianame[dev->if_port],
- tp->full_duplex ? "full" : "half", new_csr6);
+ dev_info(&dev->dev,
+ "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
+ medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half",
+ new_csr6);
}
}
}
@@ -162,7 +163,7 @@ too_good_connection:
mod_timer(&tp->timer, RUN_AT(next_tick));
if(!ioread32(ioaddr + CSR7)) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ dev_info(&dev->dev, "sw timer wakeup\n");
disable_irq(dev->irq);
tulip_refill_rx(dev);
enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf46..b8197666021e 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3)
- printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
- dev->name,ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
csr14 |= 0x00001184;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
- "csr14=%8.8x.\n", dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: On Entry to Nway, "
- "csr6=%8.8x.\n", dev->name, tp->csr6);
+ printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
+ dev->name, tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
int csr12 = ioread32(ioaddr + CSR12);
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
- " CSR5 %x, %8.8x.\n", dev->name, csr12,
- csr5, ioread32(ioaddr + CSR14));
+ dev_info(&dev->dev,
+ "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, ioread32(ioaddr + CSR14));
/* If NWay finished and we have a negotiated partner capability.
* check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
else if (negotiated & 0x0020) dev->if_port = 0;
else {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: funny autonegotiate result "
- "csr12 %8.8x advertising %4.4x\n",
- dev->name, csr12, tp->sym_advertise);
+ dev_info(&dev->dev,
+ "funny autonegotiate result csr12 %08x advertising %04x\n",
+ csr12, tp->sym_advertise);
tp->nwayset = 0;
/* so check if 100baseTx link state is okay */
if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port],
- tp->sym_advertise, tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
}
/* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
- "%8.8x.\n", dev->name, tp->csr6,
- ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6,
+ ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
return;
} else {
- printk(KERN_INFO "%s: Autonegotiation failed, "
- "using %s, link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
/* remember to turn off bit 7 - autonegotiate
* enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 100mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
/* check 100 link beat */
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 10mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 4) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+ dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
/* if all else fails default to trying 10baseT-HD */
dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e084223082..36c2725ec886 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
unsigned long flags;
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
- " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
- dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13),
- ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
}
switch (tp->chip_id) {
case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
Assume this a generic MII or SYM transceiver. */
next_tick = 60*HZ;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
- "CSR12 0x%2.2x.\n",
- dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff);
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
+ dev->name,
+ ioread32(ioaddr + CSR6), csr12 & 0xff);
break;
}
mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
s8 bitnum = p[offset];
if (p[offset+1] & 0x80) {
if (tulip_debug > 1)
- printk(KERN_DEBUG"%s: Transceiver monitor tick "
- "CSR12=%#2.2x, no media sense.\n",
- dev->name, csr12);
+ printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ dev->name, csr12);
if (mleaf->type == 4) {
if (mleaf->media == 3 && (csr12 & 0x02))
goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
break;
}
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
- " bit %d is %d, expecting %d.\n",
- dev->name, csr12, (bitnum >> 1) & 7,
- (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
- (bitnum >= 0));
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
/* Check that the specified bit has the proper value. */
if ((bitnum < 0) !=
((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ printk(KERN_DEBUG "%s: Link beat detected for %s\n",
+ dev->name,
medianame[mleaf->media & MEDIA_MASK]);
if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
if (tulip_media_cap[dev->if_port] & MediaIsFD)
goto select_next_media; /* Skip FD entries. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No link beat on media %s,"
- " trying transceiver type %s.\n",
- dev->name, medianame[mleaf->media & MEDIA_MASK],
+ printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
+ dev->name,
+ medianame[mleaf->media & MEDIA_MASK],
medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3) {
- printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
- ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "MXIC negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
}
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
- "%4.4x.\n",
- dev->name,
- tulip_mdio_read(dev, tp->phys[0], 1),
- tulip_mdio_read(dev, tp->phys[0], 5));
+ printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
+ dev->name,
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0fa3140d65bf..e1a5f03a49c5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -41,7 +41,6 @@
static char version[] __devinitdata =
"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -196,9 +195,13 @@ struct tulip_chip_table tulip_tbl[] = {
| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
+#ifdef CONFIG_TULIP_DM910X
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
@@ -207,7 +210,7 @@ struct tulip_chip_table tulip_tbl[] = {
};
-static struct pci_device_id tulip_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -228,8 +231,10 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -243,6 +248,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
+ { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ } /* terminate list */
};
@@ -319,7 +325,8 @@ static void tulip_up(struct net_device *dev)
udelay(100);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
+ dev->name, dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -380,8 +387,9 @@ static void tulip_up(struct net_device *dev)
(dev->if_port == 12 ? 0 : dev->if_port);
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using user-specified media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev,
+ "Using user-specified media %s\n",
+ medianame[dev->if_port]);
goto media_picked;
}
}
@@ -389,8 +397,9 @@ static void tulip_up(struct net_device *dev)
int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
- dev->name, medianame[looking_for]);
+ dev_info(&dev->dev,
+ "Using EEPROM-set media %s\n",
+ medianame[looking_for]);
goto media_picked;
}
}
@@ -417,9 +426,10 @@ media_picked:
if (tp->mii_cnt) {
tulip_select_media(dev, 1);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Using MII transceiver %d, status "
- "%4.4x.\n",
- dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ dev_info(&dev->dev,
+ "Using MII transceiver %d, status %04x\n",
+ tp->phys[0],
+ tulip_mdio_read(dev, tp->phys[0], 1));
iowrite32(csr6_mask_defstate, ioaddr + CSR6);
tp->csr6 = csr6_mask_hdcap;
dev->if_port = 11;
@@ -483,9 +493,10 @@ media_picked:
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ dev->name, ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -533,27 +544,30 @@ static void tulip_tx_timeout(struct net_device *dev)
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
/* Do nothing -- the media monitor should handle this. */
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
- dev->name);
+ dev_warn(&dev->dev,
+ "Transmit timeout using MII device\n");
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
tp->chip_id == DM910X) {
- printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
- "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
- ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ dev_warn(&dev->dev,
+ "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
+ ioread32(ioaddr + CSR15));
tp->timeout_recovery = 1;
schedule_work(&tp->media_work);
goto out_unlock;
} else if (tp->chip_id == PNIC2) {
- printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
- "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
- dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
- (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
+ (int)ioread32(ioaddr + CSR5),
+ (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7),
+ (int)ioread32(ioaddr + CSR12));
} else {
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
- "%8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
dev->if_port = 0;
}
@@ -563,26 +577,26 @@ static void tulip_tx_timeout(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
int j;
- printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
- "%2.2x %2.2x %2.2x.\n",
- i, (unsigned int)tp->rx_ring[i].status,
- (unsigned int)tp->rx_ring[i].length,
- (unsigned int)tp->rx_ring[i].buffer1,
- (unsigned int)tp->rx_ring[i].buffer2,
- buf[0], buf[1], buf[2]);
+ printk(KERN_DEBUG
+ "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
+ i,
+ (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
for (j = 0; buf[j] != 0xee && j < 1600; j++)
if (j < 100)
- printk(KERN_CONT " %2.2x", buf[j]);
- printk(KERN_CONT " j=%d.\n", j);
+ pr_cont(" %02x", buf[j]);
+ pr_cont(" j=%d\n", j);
}
- printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- (unsigned int)tp->rx_ring[i].status);
- printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
+ pr_cont("\n");
}
#endif
@@ -825,8 +839,9 @@ static int tulip_close (struct net_device *dev)
tulip_down (dev);
if (tulip_debug > 1)
- printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, ioread32 (ioaddr + CSR5));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Shutting down ethercard, status was %02x\n",
+ ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
@@ -1066,10 +1081,10 @@ static void set_rx_mode(struct net_device *dev)
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
- printk(KERN_INFO "%s: Added filter for %pM"
- " %8.8x bit %d.\n",
- dev->name, mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ dev_info(&dev->dev,
+ "Added filter for %pM %08x bit %d\n",
+ mclist->dmi_addr,
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
@@ -1281,9 +1296,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
unsigned int force_csr0 = 0;
#ifndef MODULE
- static int did_version; /* Already printed version info. */
- if (tulip_debug > 0 && did_version++ == 0)
- printk (KERN_INFO "%s", version);
+ if (tulip_debug > 0)
+ printk_once(KERN_INFO "%s", version);
#endif
board_idx++;
@@ -1294,23 +1308,33 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
- printk (KERN_ERR PFX "skipping LMC card.\n");
+ pr_err(PFX "skipping LMC card\n");
return -ENODEV;
}
/*
- * Early DM9100's need software CRC and the DMFE driver
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
*/
- if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
- {
- /* Read Chip revision */
- if (pdev->revision < 0x30)
- {
- printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
+#endif
/*
* Looks for early PCI chipsets where people report hangs
@@ -1353,9 +1377,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
i = pci_enable_device(pdev);
if (i) {
- printk (KERN_ERR PFX
- "Cannot enable tulip board #%d, aborting\n",
- board_idx);
+ pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
return i;
}
@@ -1364,22 +1387,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev) {
- printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ pr_err(PFX "ether device alloc failed, aborting\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
- "aborting\n", pci_name(pdev),
- (unsigned long long)pci_resource_len (pdev, 0),
- (unsigned long long)pci_resource_start (pdev, 0));
+ pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pci_name(pdev),
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
goto err_out_free_netdev;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, "tulip"))
+ if (pci_request_regions (pdev, DRV_NAME))
goto err_out_free_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1592,8 +1615,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (dev->mem_start & MEDIA_MASK)
tp->default_port = dev->mem_start & MEDIA_MASK;
if (tp->default_port) {
- printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
- board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
tp->medialock = 1;
if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
tp->full_duplex = 1;
@@ -1608,7 +1631,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
if (tp->flags & HAS_MEDIA_TABLE) {
- sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
tulip_parse_eeprom(dev);
strcpy(dev->name, "eth%d"); /* un-hack */
}
@@ -1644,20 +1667,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (register_netdev(dev))
goto err_out_free_ring;
- printk(KERN_INFO "%s: %s rev %d at "
+ pci_set_drvdata(pdev, dev);
+
+ dev_info(&dev->dev,
#ifdef CONFIG_TULIP_MMIO
- "MMIO"
+ "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
#else
- "Port"
+ "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
#endif
- " %#llx,", dev->name, chip_name, pdev->revision,
- (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
- pci_set_drvdata(pdev, dev);
-
- if (eeprom_missing)
- printk(" EEPROM not present,");
- printk(" %pM", dev->dev_addr);
- printk(", IRQ %d.\n", irq);
+ chip_name, pdev->revision,
+ (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
+ eeprom_missing ? " EEPROM not present," : "",
+ dev->dev_addr, irq);
if (tp->chip_id == PNIC2)
tp->link_change = pnic2_lnk_change;
@@ -1780,12 +1801,12 @@ static int tulip_resume(struct pci_dev *pdev)
return 0;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+ pr_err(PFX "pci_enable_device failed in resume\n");
return retval;
}
if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk (KERN_ERR "tulip: request_irq failed in resume\n");
+ pr_err(PFX "request_irq failed in resume\n");
return retval;
}
@@ -1855,7 +1876,7 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
#ifdef MODULE
- printk (KERN_INFO "%s", version);
+ pr_info("%s", version);
#endif
/* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc355..dc3335d906f6 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "uli526x"
#define DRV_VERSION "0.9.3"
#define DRV_RELDATE "2005-7-29"
@@ -82,9 +84,16 @@
#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
-#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define ULI526X_DBUG(dbug_now, msg, value) \
+do { \
+ if (uli526x_debug || (dbug_now)) \
+ pr_err("%s %lx\n", (msg), (long) (value)); \
+} while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_err("Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full" : "half");
/* CR9 definition: SROM/MII */
@@ -284,7 +293,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -295,19 +304,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -382,9 +391,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
if (err)
goto err_out_res;
- printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n",
- dev->name,ent->driver_data >> 16,pci_name(pdev),
- dev->dev_addr, dev->irq);
+ dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -516,7 +525,7 @@ static void uli526x_init(struct net_device *dev)
}
}
if(phy_tmp == 32)
- printk(KERN_WARNING "Can not find the phy address!!!");
+ pr_warning("Can not find the phy address!!!");
/* Parser SROM and media mode */
db->media_mode = uli526x_media_mode;
@@ -582,7 +591,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -592,7 +601,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
return NETDEV_TX_BUSY;
}
@@ -1058,7 +1067,7 @@ static void uli526x_timer(unsigned long data)
/* Link Failed */
ULI526X_DBUG(0, "Link Failed", tmp_cr12);
netif_carrier_off(dev);
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1099,11 @@ static void uli526x_timer(unsigned long data)
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
}
else
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
}
netif_carrier_on(dev);
}
@@ -1104,7 +1113,7 @@ static void uli526x_timer(unsigned long data)
{
if(db->init==1)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
netif_carrier_off(dev);
}
}
@@ -1230,8 +1239,7 @@ static int uli526x_resume(struct pci_dev *pdev)
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
- printk(KERN_WARNING "%s: Could not put device into D0\n",
- dev->name);
+ dev_warn(&dev->dev, "Could not put device into D0\n");
return err;
}
@@ -1432,7 +1440,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data, dev->base_addr);
dev->trans_start = jiffies;
} else
- printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+ pr_err("No Tx resource - Send_filter_frame!\n");
}
@@ -1783,7 +1791,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
}
-static struct pci_device_id uli526x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
{ 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f9..9fb89afccf7c 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
};
-static const struct pci_device_id w840_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +376,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
- pci_name(pdev));
+ pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
return -EIO;
}
dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +422,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (option & 0x200)
np->mii_if.full_duplex = 1;
if (option & 15)
- printk(KERN_INFO "%s: ignoring user supplied media type %d",
- dev->name, option & 15);
+ dev_info(&dev->dev,
+ "ignoring user supplied media type %d",
+ option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->mii_if.full_duplex = 1;
@@ -440,9 +441,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name, ioaddr,
- dev->dev_addr, irq);
+ dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
@@ -453,16 +453,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
mdio_read(dev, phy, MII_PHYSID2);
- printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
+ dev_info(&dev->dev,
+ "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
+ np->mii, phy, mii_status,
+ np->mii_if.advertising);
}
}
np->mii_cnt = phy_idx;
np->mii_if.phy_id = np->phys[0];
if (phy_idx == 0) {
- printk(KERN_WARNING "%s: MII PHY not found -- this device may "
- "not operate correctly.\n", dev->name);
+ dev_warn(&dev->dev,
+ "MII PHY not found -- this device may not operate correctly\n");
}
}
@@ -644,8 +645,8 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
- dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
+ dev->name, dev->irq);
if((i=alloc_ringdesc(dev)))
goto out_err;
@@ -657,7 +658,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+ printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
/* Set the timer to check for link beat. */
init_timer(&np->timer);
@@ -688,16 +689,18 @@ static int update_link(struct net_device *dev)
if (!(mii_reg & 0x4)) {
if (netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d reports no link. Disabling watchdog\n",
+ np->phys[0]);
netif_carrier_off(dev);
}
return np->csr6;
}
if (!netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d link is back. Enabling watchdog\n",
+ np->phys[0]);
netif_carrier_on(dev);
}
@@ -729,9 +732,10 @@ static int update_link(struct net_device *dev)
if (fasteth)
result |= 0x20000000;
if (result != np->csr6 && debug)
- printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
- duplex ? "full" : "half", np->phys[0]);
+ dev_info(&dev->dev,
+ "Setting %dMBit-%s-duplex based on MII#%d\n",
+ fasteth ? 100 : 10, duplex ? "full" : "half",
+ np->phys[0]);
return result;
}
@@ -763,8 +767,8 @@ static inline void update_csr6(struct net_device *dev, int new)
limit--;
if(!limit) {
- printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
- dev->name, csr5);
+ dev_info(&dev->dev,
+ "couldn't stop rxtx, IntrStatus %xh\n", csr5);
break;
}
udelay(1);
@@ -783,10 +787,9 @@ static void netdev_timer(unsigned long data)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
- "config %8.8x.\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
@@ -899,8 +902,8 @@ static void init_registers(struct net_device *dev)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
- printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
- "alignment to 8 longwords.\n", dev->name);
+ dev_info(&dev->dev,
+ "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
} else {
i |= 0xE000;
}
@@ -931,22 +934,23 @@ static void tx_timeout(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
+ dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
+ ioread32(ioaddr + IntrStatus));
{
int i;
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
- printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %8.8x", np->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %08x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
- printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
- np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
@@ -1055,8 +1059,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
+ dev->name, np->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -1073,8 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
if (tx_status & 0x8000) { /* There was an error, log it. */
#ifndef final_version
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, tx_status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, tx_status);
#endif
np->stats.tx_errors++;
if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1090,8 @@ static void netdev_tx_done(struct net_device *dev)
} else {
#ifndef final_version
if (debug > 3)
- printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
- dev->name, entry, tx_status);
+ printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
+ dev->name, entry, tx_status);
#endif
np->stats.tx_bytes += np->tx_skbuff[entry]->len;
np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1134,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
+ dev->name, intr_status);
if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
break;
@@ -1156,8 +1160,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
netdev_error(dev, intr_status);
if (--work_limit < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n", dev->name, intr_status);
+ dev_warn(&dev->dev,
+ "Too much work at interrupt, status=0x%04x\n",
+ intr_status);
/* Set the timer to re-enable the other interrupts after
10*82usec ticks. */
spin_lock(&np->lock);
@@ -1171,8 +1176,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread32(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1185,8 +1190,8 @@ static int netdev_rx(struct net_device *dev)
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
- printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
- entry, np->rx_ring[entry].status);
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1200,24 @@ static int netdev_rx(struct net_device *dev)
s32 status = desc->status;
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
- status);
+ printk(KERN_DEBUG " netdev_rx() status was %08x\n",
+ status);
if (status < 0)
break;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, entry %#x status %4.4x!\n",
- dev->name, np->cur_rx, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
+ np->cur_rx, status);
np->stats.rx_length_errors++;
}
} else if (status & 0x8000) {
/* There was a fatal error. */
if (debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
np->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) np->stats.rx_length_errors++;
if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- " status %x.\n", pkt_len, status);
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
@@ -1251,11 +1256,10 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (debug > 5)
- printk(KERN_DEBUG " Rx data %pM %pM"
- " %2.2x%2.2x %d.%d.%d.%d.\n",
+ printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
&skb->data[0], &skb->data[6],
skb->data[12], skb->data[13],
- skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
+ &skb->data[14]);
#endif
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1293,8 +1297,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
+ dev->name, intr_status);
if (intr_status == 0xffffffff)
return;
spin_lock(&np->lock);
@@ -1314,8 +1318,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
new = 127; /* load full packet before starting */
new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
#endif
- printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
- dev->name, new);
+ printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
+ dev->name, new);
update_csr6(dev, new);
}
if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1487,11 +1491,13 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ dev->name,
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
}
/* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1518,16 @@ static int netdev_close(struct net_device *dev)
if (debug > 2) {
int i;
- printk(KERN_DEBUG" Tx ring at %8.8x:\n",
- (int)np->tx_ring);
+ printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
- i, np->tx_ring[i].length,
- np->tx_ring[i].status, np->tx_ring[i].buffer1);
- printk(KERN_DEBUG " Rx ring %8.8x:\n",
- (int)np->rx_ring);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].length,
- np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
}
}
#endif /* __i386__ debugging only */
@@ -1622,9 +1626,8 @@ static int w840_resume (struct pci_dev *pdev)
goto out; /* device not suspended */
if (netif_running(dev)) {
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR
- "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev,
+ "pci_enable_device failed in resume\n");
goto out;
}
spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d6..acfeeb980562 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
* $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
-static struct pci_device_id xircom_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_write_config_word (pdev, PCI_STATUS,tmp16);
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
- printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
+ pr_err("%s: failed to allocate io-region\n", __func__);
return -ENODEV;
}
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
- printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n");
+ pr_err("%s: failed to allocate etherdev\n", __func__);
goto device_fail;
}
private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
+ pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail;
}
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
+ pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail;
}
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
- printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
+ pr_err("%s: netdevice registration failed\n", __func__);
goto reg_fail;
}
- printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
+ dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
+ printk("tx status 0x%08x 0x%08x \n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ printk("rx status 0x%08x 0x%08x \n",
+ card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
if (link_status_changed(card)) {
int newlink;
- printk(KERN_DEBUG "xircom_cb: Link status has changed \n");
+ printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
newlink = link_status(card);
- printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink);
+ dev_info(&dev->dev, "Link is %i mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
/* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n");
+ pr_err("Receiver failed to re-activate\n");
}
leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
/* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n");
+ pr_err("Transmitter failed to re-activate\n");
}
leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
struct sk_buff *skb;
if (pkt_len > 1518) {
- printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len);
+ pr_err("Packet length %i is bogus\n", pkt_len);
pkt_len = 1518;
}
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
if (status & 0x8000) { /* Major error */
- printk(KERN_ERR "Major transmit error status %x \n", status);
+ pr_err("Major transmit error status %x\n", status);
card->tx_buffer[4*descnr] = 0;
netif_wake_queue (dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f22210e..5adb3d150552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -144,6 +144,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
tfile->tun = tun;
tun->tfile = tfile;
+ tun->socket.file = file;
dev_hold(tun->dev);
sock_hold(tun->socket.sk);
atomic_inc(&tfile->count);
@@ -158,6 +159,7 @@ static void __tun_detach(struct tun_struct *tun)
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
tun->tfile = NULL;
+ tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
@@ -387,7 +389,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->socket.wait);
+ wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
drop:
@@ -743,7 +746,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
len = min_t(int, skb->len, len);
skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += len;
+ total += skb->len;
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -751,34 +754,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
- if (file->f_flags & O_NONBLOCK) {
+ if (noblock) {
ret = -EAGAIN;
break;
}
@@ -805,6 +797,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
current->state = TASK_RUNNING;
remove_wait_queue(&tun->socket.wait, &wait);
+ return ret;
+}
+
+static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
+ ssize_t len, ret;
+
+ if (!tun)
+ return -EBADFD;
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
@@ -847,17 +860,49 @@ static void tun_sock_write_space(struct sock *sk)
return;
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
- tun = container_of(sk, struct tun_sock, sk)->tun;
+ tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
- free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+ free_netdev(tun_sk(sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ return tun_get_user(tun, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tun_socket_ops = {
+ .sendmsg = tun_sendmsg,
+ .recvmsg = tun_recvmsg,
+};
+
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
@@ -986,11 +1031,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
goto err_free_dev;
init_waitqueue_head(&tun->socket.wait);
+ tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
- container_of(sk, struct tun_sock, sk)->tun = tun;
+ tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);
@@ -1525,6 +1571,23 @@ static void tun_cleanup(void)
rtnl_link_unregister(&tun_link_ops);
}
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tun_get_socket(struct file *file)
+{
+ struct tun_struct *tun;
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tun = tun_get(file);
+ if (!tun)
+ return ERR_PTR(-EBADFD);
+ tun_put(tun);
+ return &tun->socket;
+}
+EXPORT_SYMBOL_GPL(tun_get_socket);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be6..6e4f754c4baf 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -215,7 +215,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
-static struct pci_device_id typhoon_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088b72ea..225f65812f2e 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
- u32 upsmr, maccfg2, tbiBaseAddress;
+ u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- tbiBaseAddress = in_be32(&ug_regs->utbipar);
- tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
- tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
- value = ugeth->phydev->bus->read(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ ugeth_warn("TBI mode requires that the device "
+ "tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ ugeth_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
- ugeth->phydev->bus->write(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1563,7 +1570,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Wait for and prevent any further xmits. */
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1587,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_tx_wake_all_queues(ugeth->ndev);
+ netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -1648,25 +1658,28 @@ static void adjust_link(struct net_device *dev)
ugeth->oldspeed = phydev->speed;
}
- /*
- * To change the MAC configuration we need to disable the
- * controller. To do so, we have to either grab ugeth->lock,
- * which is a bad idea since 'graceful stop' commands might
- * take quite a while, or we can quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
-
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
@@ -3273,13 +3286,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
break;
dev->stats.tx_packets++;
- skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
-
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
@@ -3601,6 +3613,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
if (!netif_running(ndev))
return 0;
+ netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
@@ -3659,7 +3672,7 @@ static int ucc_geth_resume(struct of_device *ofdev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
- netif_start_queue(ndev);
+ netif_device_attach(ndev);
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a007e2acf651..ef1fbeb11c6e 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics {
using the maximum is
easier */
#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
-#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
-#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
is a
guess
*/
@@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 128
+#define UCC_GETH_UTFTT_INIT 512
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
-#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
FIFO size */
-#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
-#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
#define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e183a83b99..4f27f022fbf7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -419,7 +419,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
- .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER,
// .check_connect = cdc_check_connect,
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f29..c820fec62041 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -381,13 +381,13 @@ static void dm9601_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x02;
- } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) {
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
- } else if (net->mc_count) {
- struct dev_mc_list *mc_list = net->mc_list;
- int i;
+ } else if (!netdev_mc_empty(net)) {
+ struct dev_mc_list *mc_list;
- for (i = 0; i < net->mc_count; i++, mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, net) {
u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f78f0903b073..6895f1531238 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -286,6 +286,7 @@ struct hso_device {
u8 usb_gone;
struct work_struct async_get_intf;
struct work_struct async_put_intf;
+ struct work_struct reset_device;
struct usb_device *usb;
struct usb_interface *interface;
@@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial);
/* Helper functions */
static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
struct usb_device *usb, gfp_t gfp);
-static void log_usb_status(int status, const char *function);
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev);
static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
int type, int dir);
static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
@@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data);
static int hso_put_activity(struct hso_device *hso_dev);
static int hso_get_activity(struct hso_device *hso_dev);
static void tiocmget_intr_callback(struct urb *urb);
+static void reset_device(struct work_struct *data);
/*****************************************************************************/
/* Helping functions */
/*****************************************************************************/
@@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
{USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7701)},
+ {USB_DEVICE(0x0af0, 0x7706)},
{USB_DEVICE(0x0af0, 0x7801)},
{USB_DEVICE(0x0af0, 0x7901)},
+ {USB_DEVICE(0x0af0, 0x7A01)},
+ {USB_DEVICE(0x0af0, 0x7A05)},
{USB_DEVICE(0x0af0, 0x8200)},
{USB_DEVICE(0x0af0, 0x8201)},
+ {USB_DEVICE(0x0af0, 0x8300)},
+ {USB_DEVICE(0x0af0, 0x8302)},
+ {USB_DEVICE(0x0af0, 0x8304)},
+ {USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0xd157)},
{USB_DEVICE(0x0af0, 0xd257)},
{USB_DEVICE(0x0af0, 0xd357)},
+ {USB_DEVICE(0x0af0, 0xd058)},
+ {USB_DEVICE(0x0af0, 0xc100)},
{}
};
MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
spin_unlock_irqrestore(&serial_table_lock, flags);
}
-/* log a meaningful explanation of an USB status */
-static void log_usb_status(int status, const char *function)
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev)
{
char *explanation;
@@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function)
case -EMSGSIZE:
explanation = "internal error";
break;
+ case -EILSEQ:
+ case -EPROTO:
+ case -ETIME:
+ case -ETIMEDOUT:
+ explanation = "protocol error";
+ if (hso_dev)
+ schedule_work(&hso_dev->reset_device);
+ break;
default:
explanation = "unknown status";
break;
}
+
+ /* log a meaningful explanation of an USB status */
D1("%s: received USB status - %s (%d)", function, explanation, status);
}
@@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb)
/* log status, but don't act on it, we don't need to resubmit anything
* anyhow */
if (status)
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
hso_put_activity(odev->parent);
@@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
if (result) {
dev_warn(&odev->parent->interface->dev,
- "failed mux_bulk_tx_urb %d", result);
+ "failed mux_bulk_tx_urb %d\n", result);
net->stats.tx_errors++;
netif_start_queue(net);
} else {
@@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb)
/* is al ok? (Filip: Who's Al ?) */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
return;
}
@@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb)
if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
u32 rest;
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
+ rest = urb->actual_length %
+ le16_to_cpu(odev->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb)
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_warn(&odev->parent->interface->dev,
- "%s failed submit mux_bulk_rx_urb %d", __func__,
+ "%s failed submit mux_bulk_rx_urb %d\n", __func__,
result);
}
@@ -1207,7 +1230,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
D1("serial == NULL");
return;
} else if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
@@ -1225,7 +1248,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
rest =
urb->actual_length %
- serial->in_endp->wMaxPacketSize;
+ le16_to_cpu(serial->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1513,7 +1536,7 @@ static void tiocmget_intr_callback(struct urb *urb)
if (!serial)
return;
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
tiocmget = serial->tiocmget;
@@ -1700,6 +1723,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
D1("no tty structures");
return -EINVAL;
}
+
+ if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
+ return -EINVAL;
+
if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1838,7 +1865,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
if (result) {
dev_err(&ctrl_urb->dev->dev,
- "%s failed submit ctrl_urb %d type %d", __func__,
+ "%s failed submit ctrl_urb %d type %d\n", __func__,
result, type);
return result;
}
@@ -1888,7 +1915,7 @@ static void intr_callback(struct urb *urb)
/* status check */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, NULL);
return;
}
D4("\n--- Got intr callback 0x%02X ---", status);
@@ -1905,18 +1932,18 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
D1("Pending read interrupt on port %d\n", i);
spin_lock(&serial->serial_lock);
- if (serial->rx_state == RX_IDLE) {
+ if (serial->rx_state == RX_IDLE &&
+ serial->open_count > 0) {
/* Setup and send a ctrl req read on
* port i */
- if (!serial->rx_urb_filled[0]) {
+ if (!serial->rx_urb_filled[0]) {
serial->rx_state = RX_SENT;
hso_mux_serial_read(serial);
} else
serial->rx_state = RX_PENDING;
-
} else {
- D1("Already pending a read on "
- "port %d\n", i);
+ D1("Already a read pending on "
+ "port %d or port not open\n", i);
}
spin_unlock(&serial->serial_lock);
}
@@ -1958,7 +1985,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2014,7 +2041,7 @@ static void ctrl_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2358,12 +2385,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
@@ -2391,6 +2418,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
+ INIT_WORK(&hso_dev->reset_device, reset_device);
return hso_dev;
}
@@ -2831,13 +2859,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mux->shared_intr_urb) {
- dev_err(&interface->dev, "Could not allocate intr urb?");
+ dev_err(&interface->dev, "Could not allocate intr urb?\n");
goto exit;
}
- mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize,
- GFP_KERNEL);
+ mux->shared_intr_buf =
+ kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
+ GFP_KERNEL);
if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?");
+ dev_err(&interface->dev, "Could not allocate intr buf?\n");
goto exit;
}
@@ -3132,6 +3161,26 @@ out:
return result;
}
+static void reset_device(struct work_struct *data)
+{
+ struct hso_device *hso_dev =
+ container_of(data, struct hso_device, reset_device);
+ struct usb_device *usb = hso_dev->usb;
+ int result;
+
+ if (hso_dev->usb_gone) {
+ D1("No reset during disconnect\n");
+ } else {
+ result = usb_lock_device_for_reset(usb, hso_dev->interface);
+ if (result < 0)
+ D1("unable to lock device for reset: %d\n", result);
+ else {
+ usb_reset_device(usb);
+ usb_unlock_device(usb);
+ }
+ }
+}
+
static void hso_serial_ref_free(struct kref *ref)
{
struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3232,13 +3281,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
usb_rcvintpipe(usb,
shared_int->intr_endp->bEndpointAddress & 0x7F),
shared_int->shared_intr_buf,
- shared_int->intr_endp->wMaxPacketSize,
+ 1,
intr_callback, shared_int,
shared_int->intr_endp->bInterval);
result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
if (result)
- dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__,
+ dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__,
result);
return result;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f480..6fc098fe9ff7 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
/*
- * MosChips MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
+ * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (c) 2002-2003 TiVo Inc.
*
+ * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
+ *
+ * TODO:
+ * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
+ * - implement ethtool_ops get_pauseparam/set_pauseparam
+ * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
+ * - implement get_eeprom/[set_eeprom]
+ * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
+ * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
+ * can access only ~ 24, remaining user buffer is uninitialized garbage
+ * - anything else?
+ *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +69,7 @@
ADVERTISE_100HALF | ADVERTISE_10FULL | \
ADVERTISE_10HALF | ADVERTISE_CSMA)
-/* HIF_REG_XX coressponding index value */
+/* HIF_REG_XX corresponding index value */
enum {
HIF_REG_MULTICAST_HASH = 0x00,
HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +83,7 @@ enum {
HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
HIF_REG_CONFIG = 0x0e,
+ /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
HIF_REG_CONFIG_CFG = 0x80,
HIF_REG_CONFIG_SPEED100 = 0x40,
HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +91,24 @@ enum {
HIF_REG_CONFIG_TXENABLE = 0x08,
HIF_REG_CONFIG_SLEEPMODE = 0x04,
HIF_REG_CONFIG_ALLMULTICAST = 0x02,
- HIF_REG_CONFIG_PROMISCIOUS = 0x01,
+ HIF_REG_CONFIG_PROMISCUOUS = 0x01,
HIF_REG_ETHERNET_ADDR = 0x0f,
- HIF_REG_22 = 0x15,
+ HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
HIF_REG_PAUSE_THRESHOLD = 0x16,
HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
};
+/* Trailing status byte in Ethernet Rx frame */
+enum {
+ MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
+ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
+ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
+ MCS7830_RX_CRC_ERROR = 0x08,
+ MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
+ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
+ /* [7:6] reserved */
+};
+
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
@@ -109,7 +135,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
return ret;
}
-static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
struct usb_device *xdev = dev->udev;
int ret;
@@ -183,13 +209,43 @@ out:
usb_free_urb(urb);
}
-static int mcs7830_get_address(struct usbnet *dev)
+static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
{
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- dev->net->dev_addr);
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
+
if (ret < 0)
return ret;
+
+ /* it worked --> adopt it on netdev side */
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
return 0;
}
@@ -307,7 +363,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
{
u8 dummy[2];
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy);
+ ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
if (ret > 0)
return 2; /* Rev C or later */
return 1; /* earlier revision */
@@ -331,33 +387,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
}
}
-static int mcs7830_init_dev(struct usbnet *dev)
-{
- int ret;
- int retry;
-
- /* Read MAC address from EEPROM */
- ret = -EINVAL;
- for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_get_address(dev);
- if (ret) {
- dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
- goto out;
- }
-
- /* Set up PHY */
- ret = mcs7830_set_autoneg(dev, 0);
- if (ret) {
- dev_info(&dev->udev->dev, "Cannot set autoneg\n");
- goto out;
- }
-
- mcs7830_rev_C_fixup(dev);
- ret = 0;
-out:
- return ret;
-}
-
static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
int location)
{
@@ -378,11 +407,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-/* credits go to asix_set_multicast */
-static void mcs7830_set_multicast(struct net_device *net)
+static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
+{
+ return (struct mcs7830_data *)&dev->data;
+}
+
+static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
+{
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
+ sizeof data->multi_filter,
+ data->multi_filter);
+}
+
+static void mcs7830_hif_update_config(struct usbnet *dev)
+{
+ /* implementation specific to data->config
+ (argument needs to be heap-based anyway - USB DMA!) */
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+}
+
+static void mcs7830_data_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct mcs7830_data *data = (struct mcs7830_data *)&dev->data;
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ memset(data->multi_filter, 0, sizeof data->multi_filter);
data->config = HIF_REG_CONFIG_TXENABLE;
@@ -390,7 +441,7 @@ static void mcs7830_set_multicast(struct net_device *net)
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
if (net->flags & IFF_PROMISC) {
- data->config |= HIF_REG_CONFIG_PROMISCIOUS;
+ data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
net->mc_count > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
@@ -405,21 +456,51 @@ static void mcs7830_set_multicast(struct net_device *net)
u32 crc_bits;
int i;
- memset(data->multi_filter, 0, sizeof data->multi_filter);
-
/* Build the multicast hash filter. */
for (i = 0; i < net->mc_count; i++) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
mc_list = mc_list->next;
}
+ }
+}
- mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
- sizeof data->multi_filter,
- data->multi_filter);
+static int mcs7830_apply_base_config(struct usbnet *dev)
+{
+ int ret;
+
+ /* re-configure known MAC (suspend case etc.) */
+ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set MAC address\n");
+ goto out;
}
- mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+ /* Set up PHY */
+ ret = mcs7830_set_autoneg(dev, 0);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set autoneg\n");
+ goto out;
+ }
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
+
+ mcs7830_rev_C_fixup(dev);
+ ret = 0;
+out:
+ return ret;
+}
+
+/* credits go to asix_set_multicast */
+static void mcs7830_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ mcs7830_data_set_multicast(net);
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
}
static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +544,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
-{
- int ret;
- struct usbnet *dev = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- netdev->dev_addr);
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
@@ -495,21 +553,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_multicast_list = mcs7830_set_multicast,
- .ndo_set_mac_address = mcs7830_set_mac_address,
+ .ndo_set_mac_address = mcs7830_set_mac_address,
};
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
int ret;
+ int retry;
- ret = mcs7830_init_dev(dev);
+ /* Initial startup: Gather MAC address setting from EEPROM */
+ ret = -EINVAL;
+ for (retry = 0; retry < 5 && ret; retry++)
+ ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ if (ret) {
+ dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
+ goto out;
+ }
+
+ mcs7830_data_set_multicast(net);
+
+ ret = mcs7830_apply_base_config(dev);
if (ret)
goto out;
net->ethtool_ops = &mcs7830_ethtool_ops;
net->netdev_ops = &mcs7830_netdev_ops;
- mcs7830_set_multicast(net);
/* reserve space for the status byte on rx */
dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +595,7 @@ out:
return ret;
}
-/* The chip always appends a status bytes that we need to strip */
+/* The chip always appends a status byte that we need to strip */
static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
@@ -539,9 +608,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_trim(skb, skb->len - 1);
status = skb->data[skb->len];
- if (status != 0x20)
+ if (status != MCS7830_RX_FRAME_CORRECT) {
dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
+ /* hmm, perhaps usbnet.c already sees a globally visible
+ frame error and increments rx_errors on its own already? */
+ dev->net->stats.rx_errors++;
+
+ if (status & (MCS7830_RX_SHORT_FRAME
+ |MCS7830_RX_LENGTH_ERROR
+ |MCS7830_RX_LARGE_FRAME))
+ dev->net->stats.rx_length_errors++;
+ if (status & MCS7830_RX_ALIGNMENT_ERROR)
+ dev->net->stats.rx_frame_errors++;
+ if (status & MCS7830_RX_CRC_ERROR)
+ dev->net->stats.rx_crc_errors++;
+ }
+
return skb->len > 0;
}
@@ -580,6 +663,20 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
+static int mcs7830_reset_resume (struct usb_interface *intf)
+{
+ /* YES, this function is successful enough that ethtool -d
+ does show same output pre-/post-suspend */
+
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ mcs7830_apply_base_config(dev);
+
+ usbnet_resume(intf);
+
+ return 0;
+}
+
static struct usb_driver mcs7830_driver = {
.name = driver_name,
.id_table = products,
@@ -587,6 +684,7 @@ static struct usb_driver mcs7830_driver = {
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = mcs7830_reset_resume,
};
static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index dfc7e9fec3ea..21ac103fbb71 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT) {
+ if (i <= MII_TIMEOUT) {
get_registers(dev, PHYDAT, 2, data);
*reg = data[0] | (data[1] << 8);
return 0;
@@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT)
+ if (i <= MII_TIMEOUT)
return 0;
else
return 1;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f64e9b..a7e0c84426ea 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@@ -266,7 +267,7 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static const struct pci_device_id rhine_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
@@ -389,6 +390,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
return 0;
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
-
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
- spin_lock(&rp->lock);
+ spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
- spin_unlock(&rp->lock);
+ spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- spin_lock_irq(&rp->lock);
-
- netif_stop_queue(dev);
napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cee8fa2d2a9f..f15485efe40e 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
@@ -2237,8 +2237,6 @@ static int velocity_open(struct net_device *dev)
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2250,6 +2248,8 @@ static int velocity_open(struct net_device *dev)
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
napi_enable(&vptr->napi);
@@ -2339,10 +2339,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc3cb2e..9d8984a3741c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,8 +56,7 @@ struct virtnet_info
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
- /* Receive & send queues. */
- struct sk_buff_head recv;
+ /* Send queue. */
struct sk_buff_head send;
/* Work struct for refilling if we run low on memory. */
@@ -75,34 +74,44 @@ struct skb_vnet_hdr {
unsigned int num_sg;
};
+struct padded_vnet_hdr {
+ struct virtio_net_hdr hdr;
+ /*
+ * virtio_net_hdr should be in a separated sg buffer because of a
+ * QEMU bug, and data sg buffer shares same page with this header sg.
+ * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+ */
+ char padding[6];
+};
+
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
}
-static void give_a_page(struct virtnet_info *vi, struct page *page)
-{
- page->private = (unsigned long)vi->pages;
- vi->pages = page;
-}
-
-static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+static void give_pages(struct virtnet_info *vi, struct page *page)
{
- unsigned int i;
+ struct page *end;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- give_a_page(vi, skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags = 0;
- skb->data_len = 0;
+ /* Find end of list, sew whole thing into vi->pages. */
+ for (end = page; end->private; end = (struct page *)end->private);
+ end->private = (unsigned long)vi->pages;
+ vi->pages = page;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
- if (p)
+ if (p) {
vi->pages = (struct page *)p->private;
- else
+ /* clear private here, it is used to chain pages */
+ p->private = 0;
+ } else
p = alloc_page(gfp_mask);
return p;
}
@@ -118,99 +127,142 @@ static void skb_xmit_done(struct virtqueue *svq)
netif_wake_queue(vi->dev);
}
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
- unsigned len)
+static void set_skb_frag(struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int *len)
{
- struct virtnet_info *vi = netdev_priv(dev);
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
- int err;
- int i;
-
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ int i = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *f;
+
+ f = &skb_shinfo(skb)->frags[i];
+ f->size = min((unsigned)PAGE_SIZE - offset, *len);
+ f->page_offset = offset;
+ f->page = page;
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb_shinfo(skb)->nr_frags++;
+ *len -= f->size;
+}
- if (vi->mergeable_rx_bufs) {
- unsigned int copy;
- char *p = page_address(skb_shinfo(skb)->frags[0].page);
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct page *page, unsigned int len)
+{
+ struct sk_buff *skb;
+ struct skb_vnet_hdr *hdr;
+ unsigned int copy, hdr_len, offset;
+ char *p;
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ p = page_address(page);
- memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
- p += sizeof(hdr->mhdr);
+ /* copy small packet so we can reuse these pages for small data */
+ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ if (unlikely(!skb))
+ return NULL;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ hdr = skb_vnet_hdr(skb);
- memcpy(skb_put(skb, copy), p, copy);
+ if (vi->mergeable_rx_bufs) {
+ hdr_len = sizeof hdr->mhdr;
+ offset = hdr_len;
+ } else {
+ hdr_len = sizeof hdr->hdr;
+ offset = sizeof(struct padded_vnet_hdr);
+ }
- len -= copy;
+ memcpy(hdr, p, hdr_len);
- if (!len) {
- give_a_page(vi, skb_shinfo(skb)->frags[0].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(hdr->mhdr) + copy;
- skb_shinfo(skb)->frags[0].size = len;
- skb->data_len += len;
- skb->len += len;
- }
+ len -= hdr_len;
+ p += offset;
- while (--hdr->mhdr.num_buffers) {
- struct sk_buff *nskb;
+ copy = len;
+ if (copy > skb_tailroom(skb))
+ copy = skb_tailroom(skb);
+ memcpy(skb_put(skb, copy), p, copy);
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long %d\n", dev->name,
- len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ len -= copy;
+ offset += copy;
- nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
- if (!nskb) {
- pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, hdr->mhdr.num_buffers);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ while (len) {
+ set_skb_frag(skb, page, offset, &len);
+ page = (struct page *)page->private;
+ offset = 0;
+ }
- __skb_unlink(nskb, &vi->recv);
- vi->num--;
+ if (page)
+ give_pages(vi, page);
- skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
- skb_shinfo(nskb)->nr_frags = 0;
- kfree_skb(nskb);
+ return skb;
+}
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
+static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+{
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct page *page;
+ int num_buf, i, len;
+
+ num_buf = hdr->mhdr.num_buffers;
+ while (--num_buf) {
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long\n", skb->dev->name);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
- skb_shinfo(skb)->frags[i].size = len;
- skb_shinfo(skb)->nr_frags++;
- skb->data_len += len;
- skb->len += len;
+ page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ if (!page) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ skb->dev->name, hdr->mhdr.num_buffers);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
}
- } else {
- len -= sizeof(hdr->hdr);
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ set_skb_frag(skb, page, 0, &len);
+
+ --vi->num;
+ }
+ return 0;
+}
+
+static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ struct skb_vnet_hdr *hdr;
- if (len <= MAX_PACKET_LEN)
- trim_pages(vi, skb);
+ if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ dev->stats.rx_length_errors++;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ return;
+ }
- err = pskb_trim(skb, len);
- if (err) {
- pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
- len, err);
+ if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+ skb = buf;
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+ } else {
+ page = buf;
+ skb = page_to_skb(vi, page, len);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- goto drop;
+ give_pages(vi, page);
+ return;
}
+ if (vi->mergeable_rx_bufs)
+ if (receive_mergeable(vi, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
}
+ hdr = skb_vnet_hdr(skb);
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -267,110 +319,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
frame_err:
dev->stats.rx_frame_errors++;
-drop:
dev_kfree_skb(skb);
}
-static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
- struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err, i;
- bool oom = false;
-
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
- do {
- struct skb_vnet_hdr *hdr;
+ struct skb_vnet_hdr *hdr;
+ struct scatterlist sg[2];
+ int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ if (unlikely(!skb))
+ return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, MAX_PACKET_LEN);
- hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ hdr = skb_vnet_hdr(skb);
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
- if (vi->big_packets) {
- for (i = 0; i < MAX_SKB_FRAGS; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- f->page = get_a_page(vi, gfp);
- if (!f->page)
- break;
+ skb_to_sgvec(skb, sg + 1, 0, skb->len);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ if (err < 0)
+ dev_kfree_skb(skb);
- skb->data_len += PAGE_SIZE;
- skb->len += PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
- }
+static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+ struct page *first, *list = NULL;
+ char *p;
+ int i, err, offset;
+
+ /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ if (list)
+ give_pages(vi, list);
+ return -ENOMEM;
}
+ sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- skb_queue_head(&vi->recv, skb);
+ /* chain new page in list head to match sg */
+ first->private = (unsigned long)list;
+ list = first;
+ }
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
- if (err < 0) {
- skb_unlink(skb, &vi->recv);
- trim_pages(vi, skb);
- kfree_skb(skb);
- break;
- }
- vi->num++;
- } while (err >= num);
- if (unlikely(vi->num > vi->max))
- vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
- return !oom;
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ give_pages(vi, list);
+ return -ENOMEM;
+ }
+ p = page_address(first);
+
+ /* sg[0], sg[1] share the same page */
+ /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
+ sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+
+ /* sg[1] for data packet, from offset */
+ offset = sizeof(struct padded_vnet_hdr);
+ sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+
+ /* chain first in list head */
+ first->private = (unsigned long)list;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ first);
+ if (err < 0)
+ give_pages(vi, first);
+
+ return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
- struct sk_buff *skb;
- struct scatterlist sg[1];
+ struct page *page;
+ struct scatterlist sg;
int err;
- bool oom = false;
- if (!vi->mergeable_rx_bufs)
- return try_fill_recv_maxbufs(vi, gfp);
-
- do {
- skb_frag_t *f;
+ page = get_a_page(vi, gfp);
+ if (!page)
+ return -ENOMEM;
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ sg_init_one(&sg, page_address(page), PAGE_SIZE);
- f = &skb_shinfo(skb)->frags[0];
- f->page = get_a_page(vi, gfp);
- if (!f->page) {
- oom = true;
- kfree_skb(skb);
- break;
- }
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ if (err < 0)
+ give_pages(vi, page);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+{
+ int err;
+ bool oom = false;
- sg_init_one(sg, page_address(f->page), PAGE_SIZE);
- skb_queue_head(&vi->recv, skb);
+ do {
+ if (vi->mergeable_rx_bufs)
+ err = add_recvbuf_mergeable(vi, gfp);
+ else if (vi->big_packets)
+ err = add_recvbuf_big(vi, gfp);
+ else
+ err = add_recvbuf_small(vi, gfp);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err < 0) {
- skb_unlink(skb, &vi->recv);
- kfree_skb(skb);
+ oom = true;
break;
}
- vi->num++;
+ ++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
@@ -395,8 +456,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
- try_fill_recv(vi, GFP_KERNEL);
- still_empty = (vi->num == 0);
+ still_empty = !try_fill_recv(vi, GFP_KERNEL);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
@@ -408,15 +468,14 @@ static void refill_work(struct work_struct *work)
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
- struct sk_buff *skb = NULL;
+ void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
- (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
- __skb_unlink(skb, &vi->recv);
- receive_skb(vi->dev, skb, len);
- vi->num--;
+ (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ receive_buf(vi->dev, buf, len);
+ --vi->num;
received++;
}
@@ -496,9 +555,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -675,6 +734,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
+ int uc_count;
void *buf;
int i;
@@ -701,8 +761,9 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
+ uc_count = netdev_uc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
+ mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
@@ -712,16 +773,16 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = dev->uc.count;
+ mac_data->entries = uc_count;
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
- sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
+ sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
- mac_data = (void *)&mac_data->macs[dev->uc.count][0];
+ mac_data = (void *)&mac_data->macs[uc_count][0];
mac_data->entries = dev->mc_count;
addr = dev->mc_list;
@@ -916,8 +977,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
- /* Initialize our empty receive and send queues. */
- skb_queue_head_init(&vi->recv);
+ /* Initialize our empty send queue. */
skb_queue_head_init(&vi->send);
err = register_netdev(dev);
@@ -952,25 +1012,35 @@ free:
return err;
}
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+ void *buf;
+ while (1) {
+ buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ if (!buf)
+ break;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ --vi->num;
+ }
+ BUG_ON(vi->num != 0);
+}
+
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- struct sk_buff *skb;
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- /* Free our skbs in send and recv queues, if any. */
- while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
- kfree_skb(skb);
- vi->num--;
- }
+ /* Free our skbs in send queue, if any. */
__skb_queue_purge(&vi->send);
- BUG_ON(vi->num != 0);
-
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
+ free_unused_bufs(vi);
vdev->config->del_vqs(vi->vdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d77..b896f9386110 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
* PCI Device ID Table
* Last entry must be all 0s
*/
-static const struct pci_device_id vmxnet3_pciid_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a1e867..a6606b8948e9 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
"Virtualized Server Adapter");
-static struct pci_device_id vxge_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -310,7 +310,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
rx_priv->data_size, PCI_DMA_FROMDEVICE);
- if (dma_addr == 0) {
+ if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
ring->stats.pci_map_fail++;
return -EIO;
}
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
high_dma = 1;
if (pci_set_consistent_dma_mask(pdev,
- 0xffffffffffffffffULL)) {
+ DMA_BIT_MASK(64))) {
vxge_debug_init(VXGE_ERR,
"%s : unable to obtain 64bit DMA for "
"consistent allocations", __func__);
ret = -ENOMEM;
goto _exit1;
}
- } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 32bit DMA", __func__);
} else {
@@ -4297,10 +4297,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
vdev->ndev->name, ll_config.device_hw_info.product_desc);
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
- vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
- macaddr[3], macaddr[4], macaddr[5]);
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca4..f88c07c13197 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
__setup("dscc4.setup=", dscc4_setup);
#endif
-static struct pci_device_id dscc4_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e3649157..40d724a8e020 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
/*
* PCI ID lookup table
*/
-static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c820..b27850377121 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
static int LMC_PKT_BUF_SZ = 1542;
-static struct pci_device_id lmc_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d3955420..f4f1c00d0d23 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
#undef PC300_DEBUG_RX
#undef PC300_DEBUG_OTHER
-static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
/* PC300/RSV or PC300/X21, 2 chan */
{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
/* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd94..c7ab3becd261 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf022..e2cff64a446a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624ee..541c700dceef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09de..6cead321bc15 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
/* Extract MAC addresss */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
- d_printf(2, dev, "GET DEVICE INFO: mac addr "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
if (!memcmp(net_dev->perm_addr, ddi->mac_address,
sizeof(ddi->mac_address)))
goto ok;
dev_warn(dev, "warning: device reports a different MAC address "
"to that of boot mode's\n");
- dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
- dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
- net_dev->perm_addr[0], net_dev->perm_addr[1],
- net_dev->perm_addr[2], net_dev->perm_addr[3],
- net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299ca..e803a7dc6502 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
- d_printf(2, dev,
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ "mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f279417e..2d7c96d7e865 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
};
@@ -234,6 +235,7 @@ struct i2400mu {
u8 rx_size_auto_shrink;
struct dentry *debugfs_dentry;
+ unsigned i6050:1; /* 1 if this is a 6050 based SKU */
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681f8a0d..98f4f8c5fb68 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_bm_mac_addr_impaired = 0;
- if (id->idProduct == USB_DEVICE_ID_I6050) {
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
+ i2400mu->i6050 = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (i2400mu->i6050) {
i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
{ USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index e1f04bb437e3..e6ca3eb4c0d3 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc6..ef6b78da370f 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -57,7 +57,7 @@
#define DRV_NAME "airo"
#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
{ 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
@@ -5254,11 +5254,7 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
WepKeyRid wkr;
int rc;
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
- return -1;
- }
+ WARN_ON(keylen == 0);
memset(&wkr, 0, sizeof(wkr));
wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6405,11 +6401,7 @@ static int airo_set_encode(struct net_device *dev,
if (dwrq->length > MIN_KEY_SIZE)
key.len = MAX_KEY_SIZE;
else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
+ key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
@@ -6590,12 +6582,22 @@ static int airo_set_encodeext(struct net_device *dev,
default:
return -EINVAL;
}
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
+ if (key.len == 0) {
+ rc = set_wep_tx_idx(local, idx, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP transmit index to %d: %d.",
+ idx, rc);
+ return rc;
+ }
+ } else {
+ rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP key at index %d: %d.",
+ idx, rc);
+ return rc;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 9e05648356fe..71fc960814f0 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -74,7 +74,6 @@ struct ath_common;
struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
- void (*cleanup)(struct ath_common *common);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a96761111..ad4d446f0264 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -535,7 +535,7 @@ struct ath5k_txq_info {
u32 tqi_cbr_period; /* Constant bit rate period */
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Not used */
+ u32 tqi_ready_time; /* Time queue waits after an event */
};
/*
@@ -1063,6 +1063,7 @@ struct ath5k_hw {
u32 ah_cw_min;
u32 ah_cw_max;
u32 ah_limit_tx_retries;
+ u8 ah_coverage_class;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1201,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
/* Protocol Control Unit Functions */
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
+extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1233,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1316,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
* Functions used internaly
*/
-/*
- * Translate usec to hw clock units
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
- return turbo ? (usec * 80) : (usec * 40);
-}
-
-/*
- * Translate hw clock units to usec
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
- return turbo ? (clock / 80) : (clock / 40);
-}
-
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index fdfaf0f618f1..edb6c90e376f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
/* Known PCI ids */
-static const struct pci_device_id ath5k_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -254,6 +254,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class);
static const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -274,6 +276,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.bss_info_changed = ath5k_bss_info_changed,
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
+ .set_coverage_class = ath5k_set_coverage_class,
};
/*
@@ -1513,7 +1516,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
if (ret)
- return ret;
+ goto err;
+
if (sc->opmode == NL80211_IFTYPE_AP ||
sc->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
@@ -1540,10 +1544,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
if (ret) {
ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
- return ret;
+ goto err;
}
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
+
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
}
static void
@@ -3262,3 +3281,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 5d1c8677f180..6a3f4da7fb48 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -97,7 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
- u32 cksum, offset;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
@@ -116,12 +116,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
- for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
return -EIO;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 0123f3521a0b..473a483bb9c3 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -37,6 +37,14 @@
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 60f547503d75..67aa52e9bf94 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,6 +77,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ /* LiteOn AR5BXB63 (magooz@salug.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 (shahar@shahar-or.co.il) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d9..aefe84f9c04b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
}
/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
}
/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
+ * ath5k_hw_htoclock - Translate usec to hw clock units
+ *
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ return usec * ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ return clock / ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_get_clockrate - Get the clock rate for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ int clock;
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Clock rate in turbo modes is twice the normal rate */
+ if (channel->hw_value & CHANNEL_TURBO)
+ clock *= 2;
+
+ return clock;
+}
+
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 6; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_CCK)
+ return 20; /* 802.11b */
+
+ return 9; /* 802.11 a/g */
+}
+
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 8; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ return 16; /* 802.11a */
+
+ return 10; /* 802.11 b/g */
+}
+
+/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
return 0;
}
+/**
+ * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ */
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef206..9122a8556f45 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
break;
case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
@@ -520,12 +521,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
*/
unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
{
+ unsigned int slot_time_clock;
+
ATH5K_TRACE(ah->ah_sc);
+
if (ah->ah_version == AR5K_AR5210)
- return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
- AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
else
- return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+
+ return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
}
/*
@@ -533,15 +538,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+
ATH5K_TRACE(ah->ah_sc);
- if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
- ah->ah_turbo), AR5K_SLOT_TIME);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
else
- ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc77869..a35a7db0fc4c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
!(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
- * ALGO: coef = (5 * clock * carrier_freq) / 2)
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
/* TODO: Half/quarter rate */
- clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
-
+ clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
/*
* Configure QCUs/DCUs
*/
@@ -1371,8 +1374,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists */
- if (ah->ah_version == AR5K_AR5212)
- ath5k_hw_set_sleep_clock(ah, true);
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
/*
* Disable beacons and reset the register
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 03a1106ad725..5774cea23a3b 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -25,7 +25,7 @@ config ATH9K
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a9..6b50d5eb9ec3 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
main.o \
recv.o \
xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137ab..ca4994f13151 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,12 +27,6 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-static void ath_ahb_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *)common->priv;
- iounmap(sc->mem);
-}
-
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_softc *sc = (struct ath_softc *)common->priv;
@@ -54,8 +48,6 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
static struct ath_bus_ops ath_ahb_bus_ops = {
.read_cachesize = ath_ahb_read_cachesize,
- .cleanup = ath_ahb_cleanup,
-
.eeprom_read = ath_ahb_eeprom_read,
};
@@ -121,16 +113,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
+
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
- ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- goto err_detach;
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
}
ah = sc->sc_ah;
@@ -143,8 +138,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
return 0;
- err_detach:
- ath_detach(sc);
+ err_irq:
+ free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
@@ -161,8 +156,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 9f1f523e02eb..0ea340fd071c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -33,11 +33,11 @@ struct ath_node;
/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 8) ? \
+#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 16) ? \
+ (sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 32) ? \
+ ((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
@@ -341,6 +341,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_ani_calibrate(unsigned long data);
+
+/**********/
+/* BTCOEX */
+/**********/
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
@@ -358,9 +364,14 @@ struct ath_btcoex {
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
+ u32 btscan_no_stomp; /* in usec */
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
};
+int ath_init_btcoex_timer(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+
/********************/
/* LED Control */
/********************/
@@ -385,6 +396,9 @@ struct ath_led {
bool registered;
};
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -403,26 +417,29 @@ struct ath_led {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_FULL_RESET BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_WAIT_FOR_BEACON BIT(12)
-#define SC_OP_LED_ON BIT(13)
-#define SC_OP_SCANNING BIT(14)
-#define SC_OP_TSF_RESET BIT(15)
-#define SC_OP_WAIT_FOR_CAB BIT(16)
-#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
-#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
-#define SC_OP_BEACON_SYNC BIT(19)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
-#define SC_OP_PS_ENABLED BIT(23)
+#define SC_OP_INVALID BIT(0)
+#define SC_OP_BEACONS BIT(1)
+#define SC_OP_RXAGGR BIT(2)
+#define SC_OP_TXAGGR BIT(3)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_LED_ON BIT(9)
+#define SC_OP_SCANNING BIT(10)
+#define SC_OP_TSF_RESET BIT(11)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
+#define SC_OP_BT_SCAN BIT(13)
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_NULLFUNC_COMPLETED BIT(5)
+#define PS_ENABLED BIT(6)
struct ath_wiphy;
struct ath_rate_table;
@@ -458,10 +475,12 @@ struct ath_softc {
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
+ u16 ps_flags; /* PS_* */
u16 curtxpow;
u8 nbcnvifs;
u16 nvifs;
bool ps_enabled;
+ bool ps_idle;
unsigned long ps_usecount;
enum ath9k_int imask;
@@ -508,6 +527,7 @@ struct ath_wiphy {
int chan_is_ht;
};
+void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -518,21 +538,16 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-static inline void ath_bus_cleanup(struct ath_common *common)
-{
- common->bus_ops->cleanup(common);
-}
-
extern struct ieee80211_ops ath9k_ops;
+extern int modparam_nohwcrypt;
irqreturn_t ath_isr(int irq, void *dev);
-void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
-void ath_detach(struct ath_softc *sc);
+void ath9k_deinit_device(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -541,6 +556,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -582,4 +598,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+
+void ath_start_rfkill_poll(struct ath_softc *sc);
+extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf5..422454fe4ff0 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = COMMIT; /* commit next beacon */
sc->beacon.slotupdate = slot;
} else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
- ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime);
+ ah->slottime = sc->beacon.slottime;
+ ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ba31a73317c..1ee5a15ccbb1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -25,10 +25,12 @@
#define ATH_BTCOEX_DEF_BT_PERIOD 45
#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
+#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
#define ATH_BTCOEX_BMISS_THRESH 50
#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
#define ATH_BT_CNT_THRESHOLD 3
+#define ATH_BT_CNT_SCAN_THRESHOLD 15
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 592f1b70f55a..42d2a506845a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -75,17 +75,24 @@ static const struct file_operations fops_debug = {
#endif
+#define DMA_BUF_LEN 1024
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
- char buf[1024];
+ char *buf;
+ int retval;
unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
+ buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
@@ -93,20 +100,20 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
@@ -120,7 +127,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
@@ -128,35 +135,37 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return retval;
}
static const struct file_operations fops_dma = {
@@ -580,6 +589,116 @@ static const struct file_operations fops_xmit = {
.owner = THIS_MODULE
};
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
+
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1152;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "CRC ERR",
+ sc->debug.stats.rxstats.crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT CRC ERR",
+ sc->debug.stats.rxstats.decrypt_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PHY ERR",
+ sc->debug.stats.rxstats.phy_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "MIC ERR",
+ sc->debug.stats.rxstats.mic_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ sc->debug.stats.rxstats.pre_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "POST-DELIM CRC ERR",
+ sc->debug.stats.rxstats.post_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT BUSY ERR",
+ sc->debug.stats.rxstats.decrypt_busy_err);
+
+ PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
+#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+
+ struct ath_desc *ds = bf->bf_desc;
+ u32 phyerr;
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ RX_STAT_INC(crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ RX_STAT_INC(decrypt_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ RX_STAT_INC(mic_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_STAT_INC(pre_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_STAT_INC(post_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_STAT_INC(decrypt_busy_err);
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ RX_STAT_INC(phy_err);
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ RX_PHY_ERR_INC(phyerr);
+ }
+
+#undef RX_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -632,6 +751,13 @@ int ath9k_init_debug(struct ath_hw *ah)
if (!sc->debug.debugfs_xmit)
goto err;
+ sc->debug.debugfs_recv = debugfs_create_file("recv",
+ S_IRUSR,
+ sc->debug.debugfs_phy,
+ sc, &fops_recv);
+ if (!sc->debug.debugfs_recv)
+ goto err;
+
return 0;
err:
ath9k_exit_debug(ah);
@@ -643,6 +769,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
+ debugfs_remove(sc->debug.debugfs_recv);
debugfs_remove(sc->debug.debugfs_xmit);
debugfs_remove(sc->debug.debugfs_wiphy);
debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee11..86780e68b31e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
u32 delim_underrun;
};
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ */
+struct ath_rx_stats {
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
};
struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
struct dentry *debugfs_rcstat;
struct dentry *debugfs_wiphy;
struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
struct ath_stats stats;
};
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
{
}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+}
+
static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000000..deab8beb0680
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+static void ath_led_blink_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ ath_led_blink_work.work);
+
+ if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
+ return;
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work,
+ (sc->sc_flags & SC_OP_LED_ON) ?
+ msecs_to_jiffies(sc->led_off_duration) :
+ msecs_to_jiffies(sc->led_on_duration));
+
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ sc->led_on_cnt = sc->led_off_cnt = 0;
+ if (sc->sc_flags & SC_OP_LED_ON)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ else
+ sc->sc_flags |= SC_OP_LED_ON;
+}
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath_softc *sc = led->sc;
+
+ switch (brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ } else {
+ sc->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ sc->sc_flags |= SC_OP_LED_ON;
+ } else {
+ sc->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->sc = sc;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+ return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ ath_unregister_led(&sc->assoc_led);
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ ath_unregister_led(&sc->tx_led);
+ ath_unregister_led(&sc->rx_led);
+ ath_unregister_led(&sc->radio_led);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else
+ sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(sc->hw);
+ snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->radio_led, trigger);
+ sc->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(sc->hw);
+ snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->assoc_led, trigger);
+ sc->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(sc->hw);
+ snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->tx_led, trigger);
+ sc->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(sc->hw);
+ snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->rx_led, trigger);
+ sc->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ ath_deinit_leds(sc);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT scan detected");
+ sc->sc_flags |= (SC_OP_BT_SCAN |
+ SC_OP_BT_PRIORITY_DETECTED);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected");
+ sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT);
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
+ break;
+ }
+
+ ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+ if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ ath9k_hw_gen_timer_stop(ah, timer);
+
+ /* if no timer is enabled, turn off interrupt mask */
+ if (timer_table->timer_mask.val == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 timer_period;
+ bool is_btscan;
+
+ ath_detect_bt_priority(sc);
+
+ is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
+ btcoex->bt_stomp_type);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ timer_period = is_btscan ? btcoex->btscan_no_stomp :
+ btcoex->btcoex_no_stomp;
+ ath9k_gen_timer_start(ah,
+ btcoex->no_stomp_timer,
+ (ath9k_hw_gettsf32(ah) +
+ timer_period), timer_period * 10);
+ btcoex->hw_timer_enabled = true;
+ }
+
+ mod_timer(&btcoex->period_timer, jiffies +
+ msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "no stomp timer running \n");
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+
+ btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+ ath_btcoex_no_stomp_timer,
+ ath_btcoex_no_stomp_timer,
+ (void *) sc, AR_FIRST_NDP_TIMER);
+
+ if (!btcoex->no_stomp_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ del_timer_sync(&btcoex->period_timer);
+
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ btcoex->hw_timer_enabled = false;
+}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9474f9f6d400..f15fee76a4e2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
/* Helper Functions */
/********************/
-static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (!ah->curchan) /* should really check for CCK instead */
- return clks / ATH9K_CLOCK_RATE_CCK;
- if (conf->channel->band == IEEE80211_BAND_2GHZ)
- return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
-
- return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
-}
-
-static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (conf_is_ht40(conf))
- return ath9k_hw_mac_usec(ah, clks) / 2;
- else
- return ath9k_hw_mac_usec(ah, clks);
-}
-
static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -356,7 +334,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.ht_enable = 1;
ah->config.ofdm_trig_low = 200;
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
@@ -368,6 +345,11 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ ah->config.ht_enable = 1;
+ else
+ ah->config.ht_enable = 0;
+
ah->config.rx_intr_mitigation = true;
/*
@@ -413,8 +395,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
- ah->acktimeout = (u32) -1;
- ah->ctstimeout = (u32) -1;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -566,6 +546,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
case AR9271_USB:
+ case AR2427_DEVID_PCIE:
return true;
default:
break;
@@ -831,12 +812,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
}
}
-static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
+static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
{
u32 i, j;
- if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
- test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
+ if (ah->hw_version.devid == AR9280_DEVID_PCI) {
/* EEPROM Fixup */
for (i = 0; i < ah->iniModes.ia_rows; i++) {
@@ -956,7 +936,7 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_11a_eeprom_fix(ah);
+ ath9k_hw_init_eeprom_fix(ah);
r = ath9k_hw_init_macaddr(ah);
if (r) {
@@ -1180,34 +1160,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
}
-static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad ack timeout %u\n", us);
- ah->acktimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
- ah->acktimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
}
-static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad cts timeout %u\n", us);
- ah->ctstimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
- ah->ctstimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
+}
+
+static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
}
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1224,25 +1195,37 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
}
}
-static void ath9k_hw_init_user_settings(struct ath_hw *ah)
+void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ int acktimeout;
+ int slottime;
+ int sifstime;
+
ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (ah->misc_mode != 0)
REG_WRITE(ah, AR_PCU_MISC,
REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
- if (ah->slottime != (u32) -1)
- ath9k_hw_setslottime(ah, ah->slottime);
- if (ah->acktimeout != (u32) -1)
- ath9k_hw_set_ack_timeout(ah, ah->acktimeout);
- if (ah->ctstimeout != (u32) -1)
- ath9k_hw_set_cts_timeout(ah, ah->ctstimeout);
+
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
+ sifstime = 16;
+ else
+ sifstime = 10;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime = ah->slottime + 3 * ah->coverage_class;
+ acktimeout = slottime + sifstime;
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
}
+EXPORT_SYMBOL(ath9k_hw_init_global_settings);
-void ath9k_hw_detach(struct ath_hw *ah)
+void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1260,7 +1243,7 @@ free_hw:
kfree(ah);
ah = NULL;
}
-EXPORT_SYMBOL(ath9k_hw_detach);
+EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
@@ -2061,7 +2044,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_enable_rfkill(ah);
- ath9k_hw_init_user_settings(ah);
+ ath9k_hw_init_global_settings(ah);
if (AR_SREV_9287_12_OR_LATER(ah)) {
REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -3658,21 +3641,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
}
EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
-{
- if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad slot time %u\n", us);
- ah->slottime = (u32) -1;
- return false;
- } else {
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
- ah->slottime = us;
- return true;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_setslottime);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 8849450dc591..dbbf7ca5f97d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -40,6 +40,7 @@
#define AR9280_DEVID_PCI 0x0029
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
#define AR5416_AR9100_DEVID 0x000b
@@ -551,10 +552,9 @@ struct ath_hw {
u32 *bank6Temp;
int16_t txpower_indexoffset;
+ int coverage_class;
u32 beacon_interval;
u32 slottime;
- u32 acktimeout;
- u32 ctstimeout;
u32 globaltxtimeout;
/* ANI */
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
-void ath9k_hw_detach(struct ath_hw *ah);
+void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
@@ -668,7 +668,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
+void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000000..4b5e54848683
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,862 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+/* We use the hw_value as an index into our private channel structure */
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_ioread32,
+ .write = ath9k_iowrite32,
+};
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void setup_ht_cap(struct ath_softc *sc,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 tx_streams, rx_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+ 1 : 2;
+ rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+ 1 : 2;
+
+ if (tx_streams != rx_streams) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ ht_info->mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ ht_info->mcs.rx_mask[1] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "ath_desc not DWORD aligned\n");
+ BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kzalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ dd->dd_bufptr = bf;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+fail:
+ memset(dd, 0, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+static void ath9k_init_crypto(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = sc->sc_ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(sc->sc_ah, (u16) i);
+
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+
+}
+
+static int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ int r, qnum;
+
+ switch (sc->sc_ah->btcoex_hw.scheme) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
+ sc->tx.hwq_map[i] = -1;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ if (sc->beacon.beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup a beacon xmit queue\n");
+ goto err;
+ }
+
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->beacon.cabq == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ return -EIO;
+}
+
+static void ath9k_init_channels_rates(struct ath_softc *sc)
+{
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ sc->config.txpowlimit = ATH_TXPOWER_MAX;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_flags |= SC_OP_TXAGGR;
+ sc->sc_flags |= SC_OP_RXAGGR;
+ }
+
+ common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
+ common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ sc->beacon.bslot[i] = NULL;
+ sc->beacon.bslot_aphy[i] = NULL;
+ }
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = subsysid;
+ sc->sc_ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&sc->wiphy_lock);
+ spin_lock_init(&sc->sc_resetlock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_init_crypto(sc);
+ ath9k_init_channels_rates(sc);
+ ath9k_init_misc(sc);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(ah);
+ sc->sc_ah = NULL;
+
+ return ret;
+}
+
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->rate_control_algorithm = "ath9k_rate_control";
+
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ if (error != 0)
+ goto error_init;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto error_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto error_tx;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto error_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto error_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto error_world;
+ }
+
+ INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
+ INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
+ sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+error_world:
+ ieee80211_unregister_hw(hw);
+error_register:
+ ath_rx_cleanup(sc);
+error_rx:
+ ath_tx_cleanup(sc);
+error_tx:
+ /* Nothing */
+error_regd:
+ ath9k_deinit_softc(sc);
+error_init:
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ if ((sc->btcoex.no_stomp_timer) &&
+ sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ ath9k_exit_debug(sc->sc_ah);
+ ath9k_hw_deinit(sc->sc_ah);
+
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int i = 0;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (aphy == NULL)
+ continue;
+ sc->sec_wiphy[i] = NULL;
+ ieee80211_unregister_hw(aphy->hw);
+ ieee80211_free_hw(aphy->hw);
+ }
+ kfree(sc->sec_wiphy);
+
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memset(dd, 0, sizeof(*dd));
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ /* Register rate control algorithm */
+ error = ath_rate_control_register();
+ if (error != 0) {
+ printk(KERN_ERR
+ "ath9k: Unable to register rate control "
+ "algorithm: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = ath9k_debug_create_root();
+ if (error) {
+ printk(KERN_ERR
+ "ath9k: Unable to create debugfs root: %d\n",
+ error);
+ goto err_rate_unregister;
+ }
+
+ error = ath_pci_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k: No PCI devices found, driver not installed.\n");
+ error = -ENODEV;
+ goto err_remove_root;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+
+ err_remove_root:
+ ath9k_debug_remove_root();
+ err_rate_unregister:
+ ath_rate_control_unregister();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ ath_ahb_exit();
+ ath_pci_exit();
+ ath9k_debug_remove_root();
+ ath_rate_control_unregister();
+ printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295e..29851e6376a9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_MAX = 37,
+};
+
struct ath_desc {
u32 ds_link;
u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 79fbbda15493..6796d5cdc293 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static char *dev_info = "ath9k";
-
-MODULE_AUTHOR("Atheros Communications");
-MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-
-static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debugging mask");
-
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
static void ath_cache_conf_rate(struct ath_softc *sc,
struct ieee80211_conf *conf)
{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
return channel;
}
-static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -255,11 +143,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_enabled &&
- !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK)))
+ if (sc->ps_idle)
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ else if (sc->ps_enabled &&
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
@@ -316,7 +206,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
r = ath9k_hw_reset(ah, hchan, fastcc);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u Mhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
@@ -349,7 +239,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-static void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -504,7 +394,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-static void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -536,7 +426,7 @@ static void ath9k_tasklet(unsigned long data)
*/
ath_print(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -637,7 +527,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
}
chip_reset:
@@ -924,44 +814,6 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
}
}
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 tx_streams, rx_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
-
- if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -983,7 +835,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
* on the receipt of the first Beacon frame (i.e.,
* after time sync with the AP).
*/
- sc->sc_flags |= SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_BEACON_SYNC;
/* Configure the beacon */
ath_beacon_config(sc, vif);
@@ -1000,174 +852,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
}
}
-/********************************/
-/* LED functions */
-/********************************/
-
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
-static void ath_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-static void ath_deinit_leds(struct ath_softc *sc)
-{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-}
-
-static void ath_init_leds(struct ath_softc *sc)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
-}
-
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1185,7 +869,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) ",
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1240,7 +924,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1252,710 +936,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
- ah->rfkill_polarity;
-}
-
-static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- bool blocked = !!ath_is_rfkill_set(sc);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-static void ath_start_rfkill_poll(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(sc->hw->wiphy);
-}
-
-static void ath9k_uninit_hw(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- BUG_ON(!ah);
-
- ath9k_exit_debug(ah);
- ath9k_hw_detach(ah);
- sc->sc_ah = NULL;
-}
-
-static void ath_clean_core(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- int i = 0;
-
- ath9k_ps_wakeup(sc);
-
- dev_dbg(sc->dev, "Detach ATH hw\n");
-
- ath_deinit_leds(sc);
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
- ieee80211_unregister_hw(hw);
- ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
-
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
- if (!(sc->sc_flags & SC_OP_INVALID))
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
-
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- if ((sc->btcoex.no_stomp_timer) &&
- ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
-}
-
-void ath_detach(struct ath_softc *sc)
-{
- ath_clean_core(sc);
- ath9k_uninit_hw(sc);
-}
-
-void ath_cleanup(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_clean_core(sc);
- free_irq(sc->irq, sc);
- ath_bus_cleanup(common);
- kfree(sc->sec_wiphy);
- ieee80211_free_hw(sc->hw);
-
- ath9k_uninit_hw(sc);
-}
-
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
-
- return ath_reg_notifier_apply(wiphy, request, reg);
-}
-
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
- btcoex->bt_priority_cnt++;
-
- if (time_after(jiffies, btcoex->bt_priority_time +
- msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
- } else {
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
- }
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- }
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
- enum ath_stomp_type stomp_type)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
- }
-
- ath9k_hw_btcoex_enable(ah);
-}
-
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 timer_next,
- u32 timer_period)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
-
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *) data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_detect_bt_priority(sc);
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- btcoex->btcoex_no_stomp),
- btcoex->btcoex_no_stomp * 10);
- btcoex->hw_timer_enabled = true;
- }
-
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
- struct ath_softc *sc = (struct ath_softc *)arg;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-}
-
-static int ath_init_btcoex_timer(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
- btcoex->btcoex_period / 100;
-
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
-
- spin_lock_init(&btcoex->btcoex_lock);
-
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- iowrite32(val, sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- iowrite32(val, sc->mem + reg_offset);
-}
-
-static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- u32 val;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- val = ioread32(sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- val = ioread32(sc->mem + reg_offset);
- return val;
-}
-
-static const struct ath_ops ath9k_common_ops = {
- .read = ath9k_ioread32,
- .write = ath9k_iowrite32,
-};
-
-/*
- * Initialize and fill ath_softc, ath_sofct is the
- * "Software Carrier" struct. Historically it has existed
- * to allow the separation between hardware specific
- * variables (now in ath_hw) and driver specific variables.
- */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ath_hw *ah = NULL;
- struct ath_common *common;
- int r = 0, i;
- int csz = 0;
- int qnum;
-
- /* XXX: hardware will not be ready until ath_open() being called */
- sc->sc_flags |= SC_OP_INVALID;
-
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
- spin_lock_init(&sc->sc_serial_rw);
- spin_lock_init(&sc->sc_pm_lock);
- mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
- (unsigned long)sc);
-
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
- if (!ah)
- return -ENOMEM;
-
- ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
- sc->sc_ah = ah;
-
- common = ath9k_hw_common(ah);
- common->ops = &ath9k_common_ops;
- common->bus_ops = bus_ops;
- common->ah = ah;
- common->hw = sc->hw;
- common->priv = sc;
- common->debug_mask = ath9k_debug;
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- ath_read_cachesize(common, &csz);
- /* XXX assert csz is non-zero */
- common->cachelsz = csz << 2; /* convert to bytes */
-
- r = ath9k_hw_init(ah);
- if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", r);
- goto bad_free_hw;
- }
-
- if (ath9k_init_debug(ah) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto bad_free_hw;
- }
-
- /* Get the hardware key cache size. */
- common->keymax = ah->caps.keycache_size;
- if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
- common->keymax = ATH_KEYMAX;
- }
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath9k_hw_keyreset(ah, (u16) i);
-
- /* default to MONITOR mode */
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
-
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that the hal handles reseting
- * these queues at the needed time.
- */
- sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- r = -EIO;
- goto bad2;
- }
- sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- r = -EIO;
- goto bad2;
- }
-
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
- ath_cabq_update(sc);
-
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
- /* Setup data queues */
- /* NB: ensure BK queue is the lowest priority h/w queue */
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- /* Initializes the noise floor to a reasonable default value.
- * Later on this will be updated during ANI processing. */
-
- common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
-
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
- 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
- 1, NULL);
-
- sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- /* 11n Capabilities */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
-
- ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
- sc->rx.defant = ath9k_hw_getdefantenna(ah);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
- sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
-
- /* initialize beacon slots */
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
-
- /* setup channels and rates */
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-
- switch (ah->btcoex_hw.scheme) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- goto bad2;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- ath9k_hw_init_btcoex_hw(ah, qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-bad2:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-bad_free_hw:
- ath9k_uninit_hw(sc);
- return r;
-}
-
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->queues = 4;
- hw->max_rates = 4;
- hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
- /* Hardware supports 10 but we use 4 */
- hw->max_rate_tries = 4;
- hw->sta_data_size = sizeof(struct ath_node);
- hw->vif_data_size = sizeof(struct ath_vif);
-
- hw->rate_control_algorithm = "ath9k_rate_control";
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
-}
-
-/* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_common *common;
- struct ath_hw *ah;
- int error = 0, i;
- struct ath_regulatory *reg;
-
- dev_dbg(sc->dev, "Attach ATH hw\n");
-
- error = ath_init_softc(devid, sc, subsysid, bus_ops);
- if (error != 0)
- return error;
-
- ah = sc->sc_ah;
- common = ath9k_hw_common(ah);
-
- /* get mac address from hardware and set in mac80211 */
-
- SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
-
- ath_set_hw_capab(sc, hw);
-
- error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
- ath9k_reg_notifier);
- if (error)
- return error;
-
- reg = &common->regulatory;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
-
- /* initialize tx/rx engine */
- error = ath_tx_init(sc, ATH_TXBUF);
- if (error != 0)
- goto error_attach;
-
- error = ath_rx_init(sc, ATH_RXBUF);
- if (error != 0)
- goto error_attach;
-
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-
- error = ieee80211_register_hw(hw);
-
- if (!ath_is_world_regd(reg)) {
- error = regulatory_hint(hw->wiphy, reg->alpha2);
- if (error)
- goto error_attach;
- }
-
- /* Initialize LED control */
- ath_init_leds(sc);
-
- ath_start_rfkill_poll(sc);
-
- return 0;
-
-error_attach:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- ath9k_uninit_hw(sc);
-
- return error;
-}
-
int ath_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1966,6 +946,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ieee80211_stop_queues(hw);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2007,131 +989,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ ieee80211_wake_queues(hw);
+
/* Start ANI */
ath_start_ani(common);
return r;
}
-/*
- * This function will allocate both the DMA descriptor structure, and the
- * buffers it contains. These are used to contain the descriptors used
- * by the system.
-*/
-int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head, const char *name,
- int nbuf, int ndesc)
-{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
- struct ath_buf *bf;
- int i, bsize, error;
-
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
-
- INIT_LIST_HEAD(head);
- /* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
- error = -ENOMEM;
- goto fail;
- }
-
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
-
- /*
- * Need additional DMA memory because we can't use
- * descriptors that cross the 4K page boundary. Assume
- * one skipped descriptor per 4K page.
- */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- u32 ndesc_skipped =
- ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
- u32 dma_len;
-
- while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
- dd->dd_desc_len += dma_len;
-
- ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
- }
-
- /* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
- ds = dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
-
- /* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
-
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += ndesc;
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
- }
- }
- list_add_tail(&bf->list, head);
- }
- return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
-}
-
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
@@ -2210,28 +1075,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
/* mac80211 callbacks */
/**********************/
-/*
- * (Re)start btcoex timers
- */
-static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
-
- /* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-
- mod_timer(&btcoex->period_timer, jiffies);
-}
-
static int ath9k_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2401,11 +1244,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
ath_print(common, ATH_DBG_PS,
"Wake up to complete TX\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
* The actual restore operation will happen only after
@@ -2458,22 +1301,6 @@ exit:
return 0;
}
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
-}
-
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2645,10 +1472,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
ath9k_ps_wakeup(sc);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- ath_beacon_return(sc, avp);
ath9k_ps_restore(sc);
}
+ ath_beacon_return(sc, avp);
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
@@ -2703,6 +1530,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&sc->wiphy_lock);
if (enable_radio) {
+ sc->ps_idle = false;
ath_radio_enable(sc, hw);
ath_print(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
@@ -2717,7 +1545,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- sc->sc_flags |= SC_OP_PS_ENABLED;
+ sc->ps_flags |= PS_ENABLED;
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -2730,23 +1558,23 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
sc->ps_enabled = true;
ath9k_hw_setrxabort(sc->sc_ah, 1);
}
} else {
sc->ps_enabled = false;
- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
- SC_OP_NULLFUNC_COMPLETED);
+ sc->ps_flags &= ~(PS_ENABLED |
+ PS_NULLFUNC_COMPLETED);
ath9k_setpower(sc, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2756,6 +1584,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -2791,8 +1627,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
skip_chan_change:
- if (changed & IEEE80211_CONF_CHANGE_POWER)
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
+ ath_update_txpow(sc);
+ }
spin_lock_bh(&sc->wiphy_lock);
disable_radio = ath9k_all_wiphys_idle(sc);
@@ -2800,6 +1638,7 @@ skip_chan_change:
if (disable_radio) {
ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
+ sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -2956,6 +1795,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int slottime;
int error;
mutex_lock(&sc->mutex);
@@ -2991,6 +1831,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_beacon_config(sc, vif);
}
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ sc->beacon.slottime = slottime;
+ sc->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
/* Disable transmission of beacons */
if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3161,6 +2020,18 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
mutex_unlock(&sc->mutex);
}
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+
+ mutex_lock(&sc->mutex);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
+ mutex_unlock(&sc->mutex);
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -3180,64 +2051,5 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
+ .set_coverage_class = ath9k_set_coverage_class,
};
-
-static int __init ath9k_init(void)
-{
- int error;
-
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- printk(KERN_ERR
- "ath9k: Unable to register rate control "
- "algorithm: %d\n",
- error);
- goto err_out;
- }
-
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
- error = ath_pci_init();
- if (error < 0) {
- printk(KERN_ERR
- "ath9k: No PCI devices found, driver not installed.\n");
- error = -ENODEV;
- goto err_remove_root;
- }
-
- error = ath_ahb_init();
- if (error < 0) {
- error = -ENODEV;
- goto err_pci_exit;
- }
-
- return 0;
-
- err_pci_exit:
- ath_pci_exit();
-
- err_remove_root:
- ath9k_debug_remove_root();
- err_rate_unregister:
- ath_rate_control_unregister();
- err_out:
- return error;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- ath_ahb_exit();
- ath_pci_exit();
- ath9k_debug_remove_root();
- ath_rate_control_unregister();
- printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
-}
-module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea54753..9441c6718a30 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,13 +18,14 @@
#include <linux/pci.h>
#include "ath9k.h"
-static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ 0 }
@@ -49,16 +50,6 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
}
-static void ath_pci_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
-
- pci_iounmap(pdev, sc->mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
-}
-
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_hw *ah = (struct ath_hw *) common->ah;
@@ -98,7 +89,6 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
- .cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
};
@@ -113,25 +103,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u16 subsysid;
u32 val;
int ret = 0;
- struct ath_hw *ah;
char hw_name[64];
if (pci_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
- goto bad;
+ goto err_dma;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
"DMA enable failed\n");
- goto bad;
+ goto err_dma;
}
/*
@@ -171,22 +158,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
ret = -ENODEV;
- goto bad;
+ goto err_region;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
printk(KERN_ERR "PCI memory map error\n") ;
ret = -EIO;
- goto bad1;
+ goto err_iomap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
- dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto bad2;
+ goto err_alloc_hw;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +188,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->dev = &pdev->dev;
sc->mem = mem;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
- goto bad3;
- }
-
- /* setup interrupt service routine */
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto bad4;
+ goto err_irq;
}
sc->irq = pdev->irq;
- ah = sc->sc_ah;
- ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
+ ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
printk(KERN_INFO
"%s: %s mem=0x%lx, irq=%d\n",
wiphy_name(hw->wiphy),
@@ -227,15 +214,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(unsigned long)mem, pdev->irq);
return 0;
-bad4:
- ath_detach(sc);
-bad3:
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
ieee80211_free_hw(hw);
-bad2:
+err_alloc_hw:
pci_iounmap(pdev, mem);
-bad1:
+err_iomap:
pci_release_region(pdev, 0);
-bad:
+err_region:
+ /* Nothing */
+err_dma:
pci_disable_device(pdev);
return ret;
}
@@ -245,8 +235,15 @@ static void ath_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
+
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
- ath_cleanup(sc);
+ pci_iounmap(pdev, mem);
+ pci_disable_device(pdev);
+ pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 70fdb9d8db82..11968843c773 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -678,13 +678,13 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* For Multi Rate Retry we use a different number of
* retry attempt counts. This ends up looking like this:
*
- * MRR[0] = 2
- * MRR[1] = 2
- * MRR[2] = 2
- * MRR[3] = 4
+ * MRR[0] = 4
+ * MRR[1] = 4
+ * MRR[2] = 4
+ * MRR[3] = 8
*
*/
- try_per_rate = sc->hw->max_rate_tries;
+ try_per_rate = 4;
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
@@ -714,7 +714,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
for ( ; i < 4; i++) {
/* Use twice the number of tries for the last MRR segment. */
if (i + 1 == 4)
- try_per_rate = 4;
+ try_per_rate = 8;
ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
/* All other rates in the series have RTS enabled */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae69..1ca42e5148c8 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
return; /* not from our current AP */
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
- if (sc->sc_flags & SC_OP_BEACON_SYNC) {
- sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
ath_print(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on "
"timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
*/
ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"PS wait for CAB frames timed out\n");
}
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
- else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
- } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having received "
- "PS-Poll data (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "PS-Poll data (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
}
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
+ ath_debug_stat_rx(sc, bf);
+
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8e653fb937a1..72cfa8ebd9ae 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1547,9 +1547,9 @@ enum {
#define AR_BT_COEX_WEIGHT 0x8174
#define AR_BT_COEX_WGHT 0xff55
-#define AR_STOMP_ALL_WLAN_WGHT 0xffcc
-#define AR_STOMP_LOW_WLAN_WGHT 0xaaa8
-#define AR_STOMP_NONE_WLAN_WGHT 0xaa00
+#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
+#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
+#define AR_STOMP_NONE_WLAN_WGHT 0x0000
#define AR_BTCOEX_BT_WGHT 0x0000ffff
#define AR_BTCOEX_BT_WGHT_S 0
#define AR_BTCOEX_WL_WGHT 0xffff0000
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e7..a43fbf84dab9 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
SET_IEEE80211_PERM_ADDR(hw, addr);
- ath_set_hw_capab(sc, hw);
+ ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..3c790a4f38f7 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1498,26 +1498,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
ctsrate |= rate->hw_value_short;
- /*
- * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
- * Check the first rate in the series to decide whether RTS/CTS
- * or CTS-to-self has to be used.
- */
- if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- flags = ATH9K_TXDESC_CTSENA;
- else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- flags = ATH9K_TXDESC_RTSENA;
-
- /* FIXME: Handle aggregation protection */
- if (sc->config.ath_aggr_prot &&
- (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
- flags = ATH9K_TXDESC_RTSENA;
- }
-
- /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
- flags &= ~(ATH9K_TXDESC_RTSENA);
-
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
int phy;
@@ -1529,8 +1509,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Tries = rates[i].count;
series[i].ChSel = common->tx_chainmask;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
+ (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_CTSENA;
+ }
+
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
series[i].RateFlags |= ATH9K_RATESERIES_2040;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1568,6 +1555,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
}
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+ flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (flags & ATH9K_TXDESC_RTSENA)
+ flags &= ~ATH9K_TXDESC_CTSENA;
+
/* set dur_update_en for l-sig computation except for PS-Poll frames */
ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
bf->bf_lastbf->bf_desc,
@@ -1648,7 +1643,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
/* tag if this is a nullfunc frame to enable PS when AP acks it */
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
bf->bf_isnullfunc = true;
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
} else
bf->bf_isnullfunc = false;
@@ -1858,15 +1853,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having "
- "received TX status (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "received TX status (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2048,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
+ if ((sc->ps_flags & PS_ENABLED)) {
sc->ps_enabled = true;
ath9k_hw_setrxabort(sc->sc_ah, 1);
} else
- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
/*
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index d6b685a06c5e..8263633c003c 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -65,11 +65,11 @@ enum ATH_DEBUG {
#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
#ifdef CONFIG_ATH_DEBUG
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
#else
-static inline void ath_print(struct ath_common *common,
- int dbg_mask,
- const char *fmt, ...)
+static inline void __attribute__ ((format (printf, 3, 4)))
+ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
{
}
#endif /* CONFIG_ATH_DEBUG */
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750f..9ab1192004c0 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 2f12a750bc98..54d6085a887b 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -253,6 +253,14 @@ enum {
#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
+/* SHM_SHARED tx iq workarounds */
+#define B43_SHM_SH_NPHY_TXIQW0 0x0700
+#define B43_SHM_SH_NPHY_TXIQW1 0x0702
+#define B43_SHM_SH_NPHY_TXIQW2 0x0704
+#define B43_SHM_SH_NPHY_TXIQW3 0x0706
+/* SHM_SHARED tx pwr ctrl */
+#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
+#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
/* SHM_SCRATCH offsets */
#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 60290c06e950..316a913860d7 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
-
+MODULE_FIRMWARE("b43/ucode11.fw");
+MODULE_FIRMWARE("b43/ucode13.fw");
+MODULE_FIRMWARE("b43/ucode14.fw");
+MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode5.fw");
+MODULE_FIRMWARE("b43/ucode9.fw");
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -113,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -838,8 +844,10 @@ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32,
}
static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -848,19 +856,19 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
if (B43_WARN_ON(!modparam_hwtkip))
return;
- mutex_lock(&wl->mutex);
-
+ /* This is only called from the RX path through mac80211, where
+ * our mutex is already locked. */
+ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
dev = wl->current_dev;
- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
+ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
rx_tkip_phase1_write(dev, index, iv32, phase1key);
- keymac_write(dev, index, addr);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
+ /* only pairwise TKIP keys are supported right now */
+ if (WARN_ON(!sta))
+ return;
+ keymac_write(dev, index, sta->addr);
}
static void do_key_write(struct b43_wldev *dev,
@@ -3565,6 +3573,12 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
phy = &dev->phy;
+ if (conf_is_ht(conf))
+ phy->is_40mhz =
+ (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
+ else
+ phy->is_40mhz = false;
+
b43_mac_suspend(dev);
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 75b26e175e8f..8f7d7eff2d80 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -421,3 +421,48 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
{
b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
+struct b43_c32 b43_cordic(int theta)
+{
+ u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
+ 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
+ 229, 115, 57, 29, };
+ u8 i;
+ s32 tmp;
+ s8 signx = 1;
+ u32 angle = 0;
+ struct b43_c32 ret = { .i = 39797, .q = 0, };
+
+ while (theta > (180 << 16))
+ theta -= (360 << 16);
+ while (theta < -(180 << 16))
+ theta += (360 << 16);
+
+ if (theta > (90 << 16)) {
+ theta -= (180 << 16);
+ signx = -1;
+ } else if (theta < -(90 << 16)) {
+ theta += (180 << 16);
+ signx = -1;
+ }
+
+ for (i = 0; i <= 17; i++) {
+ if (theta > angle) {
+ tmp = ret.i - (ret.q >> i);
+ ret.q += ret.i >> i;
+ ret.i = tmp;
+ angle += arctg[i];
+ } else {
+ tmp = ret.i + (ret.q >> i);
+ ret.q -= ret.i >> i;
+ ret.i = tmp;
+ angle -= arctg[i];
+ }
+ }
+
+ ret.i *= signx;
+ ret.q *= signx;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9edd4e8e0c85..bd480b481bfc 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -5,6 +5,12 @@
struct b43_wldev;
+/* Complex number using 2 32-bit signed integers */
+struct b43_c32 { s32 i, q; };
+
+#define CORDIC_CONVERT(value) (((value) >= 0) ? \
+ ((((value) >> 15) + 1) >> 1) : \
+ -((((-(value)) >> 15) + 1) >> 1))
/* PHY register routing bits */
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
@@ -212,6 +218,9 @@ struct b43_phy {
bool supports_2ghz;
bool supports_5ghz;
+ /* HT info */
+ bool is_40mhz;
+
/* GMODE bit enabled? */
bool gmode;
@@ -418,5 +427,6 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
*/
void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff86..185219e0a552 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
maxpwr = bus->sprom.maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = bus->sprom.cck2gpo;
+ /*
+ * We don't read SPROM's opo as specs say. On rev8 SPROMs
+ * opo == ofdm2gpo and we don't know any SSB with LP-PHY
+ * and SPROM rev below 8.
+ */
+ B43_WARN_ON(bus->sprom.revision < 8);
ofdmpo = bus->sprom.ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
.c0 = 0,
};
-static u8 lpphy_nbits(s32 val)
-{
- u32 tmp = abs(val);
- u8 nbits = 0;
-
- while (tmp != 0) {
- nbits++;
- tmp >>= 1;
- }
-
- return nbits;
-}
-
static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
{
struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
goto out;
}
- prod_msb = lpphy_nbits(prod);
- q_msb = lpphy_nbits(qpwr);
+ prod_msb = fls(abs(prod));
+ q_msb = fls(abs(qpwr));
tmp1 = prod_msb - 20;
if (tmp1 >= 0) {
@@ -1773,47 +1767,6 @@ out:
return ret;
}
-/* Complex number using 2 32-bit signed integers */
-typedef struct {s32 i, q;} lpphy_c32;
-
-static lpphy_c32 lpphy_cordic(int theta)
-{
- u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
- 229, 115, 57, 29, };
- int i, tmp, signx = 1, angle = 0;
- lpphy_c32 ret = { .i = 39797, .q = 0, };
-
- theta = clamp_t(int, theta, -180, 180);
-
- if (theta > 90) {
- theta -= 180;
- signx = -1;
- } else if (theta < -90) {
- theta += 180;
- signx = -1;
- }
-
- for (i = 0; i <= 17; i++) {
- if (theta > angle) {
- tmp = ret.i - (ret.q >> i);
- ret.q += ret.i >> i;
- ret.i = tmp;
- angle += arctg[i];
- } else {
- tmp = ret.i + (ret.q >> i);
- ret.q -= ret.i >> i;
- ret.i = tmp;
- angle -= arctg[i];
- }
- }
-
- ret.i *= signx;
- ret.q *= signx;
-
- return ret;
-}
-
static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
u16 wait)
{
@@ -1831,8 +1784,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
- int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
- lpphy_c32 sample;
+ int i, samples = 0, angle = 0;
+ int rotation = (((36 * freq) / 20) << 16) / 100;
+ struct b43_c32 sample;
lpphy->tx_tone_freq = freq;
@@ -1848,10 +1802,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
}
for (i = 0; i < samples; i++) {
- sample = lpphy_cordic(angle);
+ sample = b43_cordic(angle);
angle += rotation;
- buf[i] = ((sample.i * max) & 0xFF) << 8;
- buf[i] |= (sample.q * max) & 0xFF;
+ buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
+ buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a78077..6392da25efed 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,46 @@
#include "b43.h"
#include "phy_n.h"
#include "tables_nphy.h"
+#include "main.h"
+struct nphy_txgains {
+ u16 txgm[2];
+ u16 pga[2];
+ u16 pad[2];
+ u16 ipa[2];
+};
+
+struct nphy_iqcal_params {
+ u16 txgm;
+ u16 pga;
+ u16 pad;
+ u16 ipa;
+ u16 cal_gain;
+ u16 ncorr[5];
+};
+
+struct nphy_iq_est {
+ s32 iq0_prod;
+ u32 i0_pwr;
+ u32 q0_pwr;
+ s32 iq1_prod;
+ u32 i1_pwr;
+ u32 q1_pwr;
+};
+
+enum b43_nphy_rf_sequence {
+ B43_RFSEQ_RX2TX,
+ B43_RFSEQ_TX2RX,
+ B43_RFSEQ_RESET2RX,
+ B43_RFSEQ_UPDATE_GAINH,
+ B43_RFSEQ_UPDATE_GAINL,
+ B43_RFSEQ_UPDATE_GAINU,
+};
+
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length);
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq);
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -197,173 +236,929 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
~B43_NPHY_RFCTL_CMD_EN);
}
-#define ntab_upload(dev, offset, data) do { \
- unsigned int i; \
- for (i = 0; i < (offset##_SIZE); i++) \
- b43_ntab_write(dev, (offset) + i, (data)[i]); \
- } while (0)
-
-/* Upload the N-PHY tables. */
+/*
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
+ */
static void b43_nphy_tables_init(struct b43_wldev *dev)
{
- /* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
-
- /* Volatile tables */
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
- ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
- ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
}
-static void b43_nphy_workarounds(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
+
+ if (dev->phy.rev >= 3) {
+ if (ipa) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
+{
+ u32 tmslow;
+
+ if (dev->phy.type != B43_PHYTYPE_N)
+ return;
+
+ tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
+ if (force)
+ tmslow |= SSB_TMSLOW_FGC;
+ else
+ tmslow &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ /* TODO: Call N PHY RF Ctrl Intc Override with 2, 0, 3 as arguments */
+ /* TODO: Call N PHY RF Intc Override with 8, 0, 3, 0 as arguments */
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+
+ /* TODO: Call N PHY RF Ctrl Intc Override with 1, rxval, (core + 1) */
+ /* TODO: Call N PHY RF Ctrl Intc Override with 1, txval, (2 - core) */
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ B43_WARN_ON(1);
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ int i;
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
+ for (i = 0; i < 4; i++)
+ array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
- unsigned int i;
+ struct b43_phy_n *nphy = phy->n;
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (1 /* FIXME band is 2.4GHz */) {
- b43_phy_set(dev, B43_NPHY_CLASSCTL,
- B43_NPHY_CLASSCTL_CCKEN);
+ if (enable) {
+ u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i, j;
+ u8 code;
+
+ /* TODO: for PHY >= 3
+ s8 *lna1_gain, *lna2_gain;
+ u8 *gain_db, *gain_bits;
+ u16 *rfseq_init;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+ */
+
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
} else {
- b43_phy_mask(dev, B43_NPHY_CLASSCTL,
- ~B43_NPHY_CLASSCTL_CCKEN);
- }
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_phy_write(dev, B43_NPHY_TXFRAMEDELAY, 8);
-
- /* Fixup some tables */
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x800);
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- //TODO set RF sequence
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 66);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 66);
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- if (0 /*FIXME*/) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 9);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 9);
-
- /* Set gain backoff */
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT);
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
+ 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
+ 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
/* Set HPVGA2 index */
b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
~B43_NPHY_C1_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
~B43_NPHY_C2_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+
+ /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
- //FIXME verify that the specs really mean to use autoinc here.
- for (i = 0; i < 3; i++)
- b43_ntab_write(dev, B43_NTAB16(7, 0x106) + i, 0x673);
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++)
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, 3 * j);
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
+ }
}
+}
- /* Set minimum gain value */
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN,
- ~B43_NPHY_C1_MINGAIN,
- 23 << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN,
- ~B43_NPHY_C2_MINGAIN,
- 23 << B43_NPHY_C2_MINGAIN_SHIFT);
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- if (phy->rev < 2) {
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ /* TODO: convert to b43_ntab_write? */
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (bus->sprom.boardflags2_lo & 0x100 &&
+ bus->boardinfo.type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_crtl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ (u16)~B43_NPHY_PIL_DW_64QAM);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
}
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
}
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
+static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
+ bool test)
{
- u16 bbcfg;
+ int i;
+ u16 bw, len, rot, angle;
+ struct b43_c32 *samples;
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA);
- b43_phy_write(dev, B43_NPHY_BBCFG,
- bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC);
+
+ bw = (dev->phy.is_40mhz) ? 40 : 20;
+ len = bw << 3;
+
+ if (test) {
+ if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX)
+ bw = 82;
+ else
+ bw = 80;
+
+ if (dev->phy.is_40mhz)
+ bw <<= 1;
+
+ len = bw << 1;
+ }
+
+ samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ rot = (((freq * 36) / bw) << 16) / 100;
+ angle = 0;
+
+ for (i = 0; i < len; i++) {
+ samples[i] = b43_cordic(angle);
+ angle += rot;
+ samples[i].q = CORDIC_CONVERT(samples[i].q * max);
+ samples[i].i = CORDIC_CONVERT(samples[i].i * max);
+ }
+
+ /* TODO: Call N PHY Load Sample Table with buffer, len as arguments */
+ kfree(samples);
+ return len;
}
-enum b43_nphy_rf_sequence {
- B43_RFSEQ_RX2TX,
- B43_RFSEQ_TX2RX,
- B43_RFSEQ_RESET2RX,
- B43_RFSEQ_UPDATE_GAINH,
- B43_RFSEQ_UPDATE_GAINL,
- B43_RFSEQ_UPDATE_GAINU,
-};
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
+static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
+ u16 wait, bool iqmode, bool dac_test)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 seq_mode;
+ u32 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if ((nphy->bb_mult_save & 0x80000000) == 0) {
+ tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
+ nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
+ }
+ if (!dev->phy.is_40mhz)
+ tmp = 0x6464;
+ else
+ tmp = 0x4747;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
+
+ if (loops != 0xFFFF)
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1));
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait);
+
+ seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER);
+ if (iqmode) {
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+ b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
+ } else {
+ if (dac_test)
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
+ }
+ for (i = 0; i < 100; i++) {
+ if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ b43err(dev->wl, "run samples timeout\n");
+
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq)
{
@@ -376,6 +1171,7 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
[B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
};
int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
@@ -389,8 +1185,83 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
}
b43err(dev->wl, "RF sequence status timeout\n");
ok:
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER));
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << core) & i) != 0) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((core & (1 << i)) != 0)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
}
static void b43_nphy_bphy_init(struct b43_wldev *dev)
@@ -411,81 +1282,1597 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
}
-/* RSSI Calibration */
-static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
+static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
+ s8 offset, u8 core, u8 rail, u8 type)
{
- //TODO
+ u16 tmp;
+ bool core1or5 = (core == 1) || (core == 5);
+ bool core2or5 = (core == 2) || (core == 5);
+
+ offset = clamp_val(offset, -32, 31);
+ tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
+
+ if (core1or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
+ if (core1or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
+ if (core2or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
+ if (core2or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
+ if (core1or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
+ if (core1or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
+ if (core2or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
+ if (core2or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
+ if (core1or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
+ if (core1or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
+ if (core2or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
+ if (core2or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
+ if (core1or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
+ if (core1or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
+ if (core2or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
+ if (core2or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
+ if (core1or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
+ if (core1or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
+ if (core2or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
+ if (core2or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
+ if (core1or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
+ if (core2or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
+ if (core1or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
+ if (core2or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
+}
+
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ /* TODO use some definitions */
+ if (code == 0) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
+ 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ 0xFEC7, 0x0180);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xEFDC, (code << 1 | 0x1021));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ }
+}
+
+static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 reg, val;
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
+ } else {
+ for (i = 0; i < 2; i++) {
+ if ((code == 1 && i == 1) || (code == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
+ b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
+
+ if (type < 3) {
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+ b43_phy_maskset(dev, reg, 0xFCFF, 0);
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_LUT_TRSW_UP1 :
+ B43_NPHY_RFCTL_LUT_TRSW_UP2;
+ b43_phy_maskset(dev, reg, 0xFFC3, 0);
+
+ if (type == 0)
+ val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ else if (type == 1)
+ val = 16;
+ else
+ val = 32;
+ b43_phy_set(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_TXF_40CO_B1S0 :
+ B43_NPHY_TXF_40CO_B32S1;
+ b43_phy_set(dev, reg, 0x0020);
+ } else {
+ if (type == 6)
+ val = 0x0100;
+ else if (type == 3)
+ val = 0x0200;
+ else
+ val = 0x0300;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+
+ b43_phy_maskset(dev, reg, 0xFCFF, val);
+ b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
+
+ if (type != 3 && type != 6) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ))
+ val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ else
+ val = 0x11;
+ reg = (i == 0) ? 0x2000 : 0x3000;
+ reg |= B2055_PADDRV;
+ b43_radio_write16(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 :
+ B43_NPHY_AFECTL_OVER;
+ b43_phy_set(dev, reg, 0x0200);
+ }
+ }
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
+static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_rev3_rssi_select(dev, code, type);
+ else
+ b43_nphy_rev2_rssi_select(dev, code, type);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
+static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
+{
+ int i;
+ for (i = 0; i < 2; i++) {
+ if (type == 2) {
+ if (i == 0) {
+ b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
+ 0xFC, buf[0]);
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xFC, buf[1]);
+ } else {
+ b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
+ 0xFC, buf[2 * i]);
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xFC, buf[2 * i + 1]);
+ }
+ } else {
+ if (i == 0)
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xF3, buf[0] << 2);
+ else
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xF3, buf[2 * i + 1] << 2);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
+static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
+ u8 nsamp)
+{
+ int i;
+ int out;
+ u16 save_regs_phy[9];
+ u16 s[2];
+
+ if (dev->phy.rev >= 3) {
+ save_regs_phy[0] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1);
+ save_regs_phy[1] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP2);
+ save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
+ save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
+ }
+
+ b43_nphy_rssi_select(dev, 5, type);
+
+ if (dev->phy.rev < 2) {
+ save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
+ }
+
+ for (i = 0; i < 4; i++)
+ buf[i] = 0;
+
+ for (i = 0; i < nsamp; i++) {
+ if (dev->phy.rev < 2) {
+ s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
+ s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
+ } else {
+ s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
+ s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
+ }
+
+ buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
+ buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
+ buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
+ buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
+ }
+ out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
+ (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
+
+ if (dev->phy.rev < 2)
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
+ save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ save_regs_phy[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
+ }
+
+ return out;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
+static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+{
+ int i, j;
+ u8 state[4];
+ u8 code, val;
+ u16 class, override;
+ u8 regs_save_radio[2];
+ u16 regs_save_phy[2];
+ s8 offset[4];
+
+ u16 clip_state[2];
+ u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+ s32 results_min[4] = { };
+ u8 vcm_final[4] = { };
+ s32 results[4][4] = { };
+ s32 miniq[4][2] = { };
+
+ if (type == 2) {
+ code = 0;
+ val = 6;
+ } else if (type < 2) {
+ code = 25;
+ val = 4;
+ } else {
+ B43_WARN_ON(1);
+ return;
+ }
+
+ class = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 7, 4);
+ b43_nphy_read_clip_detection(dev, clip_state);
+ b43_nphy_write_clip_detection(dev, clip_off);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ override = 0x140;
+ else
+ override = 0x110;
+
+ regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
+
+ regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
+
+ state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
+ state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
+ b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
+ b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
+ state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
+ state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
+
+ b43_nphy_rssi_select(dev, 5, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp[4];
+ for (j = 0; j < 4; j++)
+ tmp[j] = i;
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
+ b43_nphy_poll_rssi(dev, type, results[i], 8);
+ if (type < 2)
+ for (j = 0; j < 2; j++)
+ miniq[i][j] = min(results[i][2 * j],
+ results[i][2 * j + 1]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ s32 mind = 40;
+ u8 minvcm = 0;
+ s32 minpoll = 249;
+ s32 curr;
+ for (j = 0; j < 4; j++) {
+ if (type == 2)
+ curr = abs(results[j][i]);
+ else
+ curr = abs(miniq[j][i / 2] - code * 8);
+
+ if (curr < mind) {
+ mind = curr;
+ minvcm = j;
+ }
+
+ if (results[j][i] < minpoll)
+ minpoll = results[j][i];
+ }
+ results_min[i] = minpoll;
+ vcm_final[i] = minvcm;
+ }
+
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
+
+ for (i = 0; i < 4; i++) {
+ offset[i] = (code * 8) - results[vcm_final[i]][i];
+
+ if (offset[i] < 0)
+ offset[i] = -((abs(offset[i]) + 4) / 8);
+ else
+ offset[i] = (offset[i] + 4) / 8;
+
+ if (results_min[i] == 248)
+ offset[i] = code - 32;
+
+ if (i % 2 == 0)
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
+ type);
+ else
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
+ type);
+ }
+
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
+
+ switch (state[2]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 1, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 1, 0);
+ break;
+ case 2:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ }
+
+ switch (state[3]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 2, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 2, 0);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 2, 1);
+ break;
+ }
+
+ b43_nphy_rssi_select(dev, 0, type);
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
+
+ b43_nphy_classifier(dev, 7, class);
+ b43_nphy_write_clip_detection(dev, clip_state);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+{
+ /* TODO */
+}
+
+/*
+ * RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
+ */
+static void b43_nphy_rssi_cal(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3) {
+ b43_nphy_rev3_rssi_cal(dev);
+ } else {
+ b43_nphy_rev2_rssi_cal(dev, 2);
+ b43_nphy_rev2_rssi_cal(dev, 0);
+ b43_nphy_rev2_rssi_cal(dev, 1);
+ }
+}
+
+/*
+ * Restore RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
+ */
+static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 *rssical_radio_regs = NULL;
+ u16 *rssical_phy_regs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (!nphy->rssical_chanspec_2G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
+ } else {
+ if (!nphy->rssical_chanspec_5G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
+ }
+
+ /* TODO use some definitions */
+ b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
+ b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ /* TODO If the chip is 47162
+ return txpwrctrl_tx_gain_ipa_rev5 */
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
+static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 *save = nphy->tx_rx_cal_radio_saveregs;
+ u16 tmp;
+ u8 offset, i;
+
+ if (dev->phy.rev >= 3) {
+ for (i = 0; i < 2; i++) {
+ tmp = (i == 0) ? 0x2000 : 0x3000;
+ offset = i * 11;
+
+ save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL);
+ save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL);
+ save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS);
+ save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS);
+ save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS);
+ save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV);
+ save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1);
+ save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2);
+ save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL);
+ save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC);
+ save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ if (nphy->ipa5g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 4);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 1);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F);
+ }
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0);
+ if (nphy->ipa2g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 6);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2,
+ (dev->phy.rev < 5) ? 0x11 : 0x01);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ }
+ }
+ b43_radio_write16(dev, tmp | B2055_XOREGUL, 0);
+ b43_radio_write16(dev, tmp | B2055_XOMISC, 0);
+ b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0);
+ }
+ } else {
+ save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
+
+ save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
+
+ save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
+
+ save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
+
+ save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
+ save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
+
+ if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
+ B43_NPHY_BANDCTL_5GHZ)) {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
+ } else {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
+ }
+
+ if (dev->phy.rev < 2) {
+ b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
+ b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
+ } else {
+ b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
+ b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
+static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 scale, entry;
+
+ u16 tmp = nphy->txcal_bbmult;
+ if (core == 0)
+ tmp >>= 8;
+ tmp &= 0xff;
+
+ for (i = 0; i < 18; i++) {
+ scale = (ladder_lo[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i), entry);
+
+ scale = (ladder_iq[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
+static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i;
+ for (i = 0; i < 15; i++)
+ b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
+ tbl_tx_filter_coef_rev4[2][i]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
+static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i, j;
+ /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
+ u16 offset[] = { 0x186, 0x195, 0x2C5 };
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[i] + j),
+ tbl_tx_filter_coef_rev4[i][j]);
+
+ if (dev->phy.is_40mhz) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[3][j]);
+ } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[5][j]);
+ }
+
+ if (dev->phy.channel == 14)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[6][j]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
+static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 curr_gain[2];
+ struct nphy_txgains target;
+ const u32 *table = NULL;
+
+ if (nphy->txpwrctrl == 0) {
+ int i;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ target.ipa[i] = curr_gain[i] & 0x000F;
+ target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
+ target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
+ target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
+ } else {
+ target.ipa[i] = curr_gain[i] & 0x0003;
+ target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
+ target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
+ target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
+ }
+ }
+ } else {
+ int i;
+ u16 index[2];
+ index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+ index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (dev->phy.rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ else if (dev->phy.rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0xF;
+ target.pad[i] = (table[index[i]] >> 20) & 0xF;
+ target.pga[i] = (table[index[i]] >> 24) & 0xF;
+ target.txgm[i] = (table[index[i]] >> 28) & 0xF;
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0x3;
+ target.pad[i] = (table[index[i]] >> 18) & 0x3;
+ target.pga[i] = (table[index[i]] >> 20) & 0x7;
+ target.txgm[i] = (table[index[i]] >> 23) & 0x7;
+ }
+ }
+ }
+
+ return target;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
+static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]);
+ b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]);
+ b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]);
+ b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+ b43_nphy_reset_cca(dev);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]);
+ b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
+static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+ u16 tmp;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ if (dev->phy.rev >= 3) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[3] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
+
+ regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
+ regs[5] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 3), 0);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 19));
+ regs[6] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 19), 0);
+ regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+
+ /* TODO: Call N PHY RF Ctrl Intc Override with 2, 1, 3 */
+ /* TODO: Call N PHY RF Ctrl Intc Override with 1, 2, 1 */
+ /* TODO: Call N PHY RF Ctrl Intc Override with 1, 8, 2 */
+
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 2));
+ regs[3] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 2), tmp);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 18));
+ regs[4] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ tmp = 0x0180;
+ else
+ tmp = 0x0120;
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
+static void b43_nphy_restore_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 coef[4];
+ u16 *loft = NULL;
+ u16 *table = NULL;
+
+ int i;
+ u16 *txcal_radio_regs = NULL;
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (nphy->iqcal_chanspec_2G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ loft = &nphy->cal_cache.txcal_coeffs_2G[5];
+ } else {
+ if (nphy->iqcal_chanspec_5G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ loft = &nphy->cal_cache.txcal_coeffs_5G[5];
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+ table[i] = coef[i];
+ else
+ coef[i] = 0;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ } else {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ }
+
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
+ b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
+ b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
+ b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
+ b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
+ } else {
+ b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
+ }
+ b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
+static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
+ struct nphy_txgains target,
+ bool full, bool mphase)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ int error = 0;
+ int freq;
+ bool avoid = false;
+ u8 length;
+ u16 tmp, core, type, count, max, numb, last, cmd;
+ const u16 *table;
+ bool phy6or5x;
+
+ u16 buffer[11];
+ u16 diq_start = 0;
+ u16 save[2];
+ u16 gain[2];
+ struct nphy_iqcal_params params[2];
+ bool updated[2] = { };
+
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if (dev->phy.rev >= 4) {
+ avoid = nphy->hang_avoid;
+ nphy->hang_avoid = 0;
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
+ gain[i] = params[i].cal_gain;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain);
+
+ b43_nphy_tx_cal_radio_setup(dev);
+ b43_nphy_tx_cal_phy_setup(dev);
+
+ phy6or5x = dev->phy.rev >= 6 ||
+ (dev->phy.rev == 5 && nphy->ipa2g_on &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ if (phy6or5x) {
+ if (dev->phy.is_40mhz) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_40);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_40);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_20);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_20);
+ }
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+
+ if (!dev->phy.is_40mhz)
+ freq = 2500;
+ else
+ freq = 5000;
+
+ if (nphy->mphase_cal_phase_id > 2)
+ b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
+ 0xFFFF, 0, true, false);
+ else
+ error = b43_nphy_tx_tone(dev, freq, 250, true, false);
+
+ if (error == 0) {
+ if (nphy->mphase_cal_phase_id > 2) {
+ table = nphy->mphase_txcal_bestcoeffs;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ if (!full && nphy->txiqlocal_coeffsvalid) {
+ table = nphy->txiqlocal_bestc;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ full = true;
+ if (dev->phy.rev >= 3) {
+ table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
+ } else {
+ table = tbl_tx_iqlo_cal_startcoefs;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
+ }
+ }
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
+
+ if (full) {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
+ } else {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
+ }
+
+ if (mphase) {
+ count = nphy->mphase_txcal_cmdidx;
+ numb = min(max,
+ (u16)(count + nphy->mphase_txcal_numcmds));
+ } else {
+ count = 0;
+ numb = max;
+ }
+
+ for (; count < numb; count++) {
+ if (full) {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
+ } else {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_recal[count];
+ }
+
+ core = (cmd & 0x3000) >> 12;
+ type = (cmd & 0x0F00) >> 8;
+
+ if (phy6or5x && updated[core] == 0) {
+ b43_nphy_update_tx_cal_ladder(dev, core);
+ updated[core] = 1;
+ }
+
+ tmp = (params[core].ncorr[type] << 8) | 0x66;
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
+
+ if (type == 1 || type == 3 || type == 4) {
+ buffer[0] = b43_ntab_read(dev,
+ B43_NTAB16(15, 69 + core));
+ diq_start = buffer[0];
+ buffer[0] = 0;
+ b43_ntab_write(dev, B43_NTAB16(15, 69 + core),
+ 0);
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
+ for (i = 0; i < 2000; i++) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
+ if (tmp & 0xC000)
+ break;
+ udelay(10);
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length,
+ buffer);
+
+ if (type == 1 || type == 3 || type == 4)
+ buffer[0] = diq_start;
+ }
+
+ if (mphase)
+ nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
+
+ last = (dev->phy.rev < 3) ? 6 : 7;
+
+ if (!mphase || nphy->mphase_cal_phase_id == last) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer);
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer);
+ if (dev->phy.rev < 3) {
+ buffer[0] = 0;
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ buffer);
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->txiqlocal_bestc);
+ nphy->txiqlocal_coeffsvalid = true;
+ /* TODO: Set nphy->txiqlocal_chanspec to
+ the current channel */
+ } else {
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->mphase_txcal_bestcoeffs);
+ }
+
+ b43_nphy_stop_playback(dev);
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
+ }
+
+ b43_nphy_tx_cal_phy_cleanup(dev);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (dev->phy.rev >= 4)
+ nphy->hang_avoid = avoid;
+
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ return error;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
+static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j, index;
+ u8 rfctl[2];
+ u8 afectl_core;
+ u16 tmp[6];
+ u16 cur_hpf1, cur_hpf2, cur_lna;
+ u32 real, imag;
+ enum ieee80211_band band;
+
+ u8 use;
+ u16 cur_hpf;
+ u16 lna[3] = { 3, 3, 1 };
+ u16 hpf1[3] = { 7, 2, 0 };
+ u16 hpf2[3] = { 2, 0, 0 };
+ u32 power[3] = { };
+ u16 gain_save[2];
+ u16 cal_gain[2];
+ struct nphy_iqcal_params cal_params[2];
+ struct nphy_iq_est est;
+ int ret = 0;
+ bool playtone = true;
+ int desired = 13;
+
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev < 2)
+ ;/* TODO: Call N PHY Reapply TX Cal Coeffs */
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
+ cal_gain[i] = cal_params[i].cal_gain;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ rfctl[0] = B43_NPHY_RFCTL_INTC1;
+ rfctl[1] = B43_NPHY_RFCTL_INTC2;
+ afectl_core = B43_NPHY_AFECTL_C1;
+ } else {
+ rfctl[0] = B43_NPHY_RFCTL_INTC2;
+ rfctl[1] = B43_NPHY_RFCTL_INTC1;
+ afectl_core = B43_NPHY_AFECTL_C2;
+ }
+
+ tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ tmp[2] = b43_phy_read(dev, afectl_core);
+ tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ tmp[4] = b43_phy_read(dev, rfctl[0]);
+ tmp[5] = b43_phy_read(dev, rfctl[1]);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ (1 - i));
+ b43_phy_set(dev, afectl_core, 0x0006);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
+
+ band = b43_current_band(dev->wl);
+
+ if (nphy->rxcalparams & 0xFF000000) {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x140);
+ else
+ b43_phy_write(dev, rfctl[0], 0x110);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x180);
+ else
+ b43_phy_write(dev, rfctl[0], 0x120);
+ }
+
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[1], 0x148);
+ else
+ b43_phy_write(dev, rfctl[1], 0x114);
+
+ if (nphy->rxcalparams & 0x10000) {
+ b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
+ (i + 1));
+ b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
+ (2 - i));
+ }
+
+ for (j = 0; i < 4; j++) {
+ if (j < 3) {
+ cur_lna = lna[j];
+ cur_hpf1 = hpf1[j];
+ cur_hpf2 = hpf2[j];
+ } else {
+ if (power[1] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 2;
+ } else {
+ if (power[0] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 1;
+ } else {
+ index = 0;
+ use = 2;
+ cur_hpf = cur_hpf2;
+ }
+ }
+ cur_lna = lna[index];
+ cur_hpf1 = hpf1[index];
+ cur_hpf2 = hpf2[index];
+ cur_hpf += desired - hweight32(power[index]);
+ cur_hpf = clamp_val(cur_hpf, 0, 10);
+ if (use == 1)
+ cur_hpf1 = cur_hpf;
+ else
+ cur_hpf2 = cur_hpf;
+ }
+
+ tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
+ (cur_lna << 2));
+ b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3,
+ false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_stop_playback(dev);
+
+ if (playtone) {
+ ret = b43_nphy_tx_tone(dev, 4000,
+ (nphy->rxcalparams & 0xFFFF),
+ false, false);
+ playtone = false;
+ } else {
+ b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
+ false, false);
+ }
+
+ if (ret == 0) {
+ if (j < 3) {
+ b43_nphy_rx_iq_est(dev, &est, 1024, 32,
+ false);
+ if (i == 0) {
+ real = est.i0_pwr;
+ imag = est.q0_pwr;
+ } else {
+ real = est.i1_pwr;
+ imag = est.q1_pwr;
+ }
+ power[i] = ((real + imag) / 1024) + 1;
+ } else {
+ b43_nphy_calc_rx_iq_comp(dev, 1 << i);
+ }
+ b43_nphy_stop_playback(dev);
+ }
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
+ b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
+ b43_phy_write(dev, rfctl[1], tmp[5]);
+ b43_phy_write(dev, rfctl[0], tmp[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
+ b43_phy_write(dev, afectl_core, tmp[2]);
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_nphy_rf_control_override(dev, 0x400, 0, 3, true);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+
+ b43_nphy_stay_in_carrier_search(dev, 0);
+
+ return ret;
+}
+
+static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ return -1;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
+static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ if (dev->phy.rev >= 3)
+ return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
+ else
+ return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
+}
+
+/*
+ * Init N-PHY
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ */
int b43_phy_initn(struct b43_wldev *dev)
{
+ struct ssb_bus *bus = dev->dev->bus;
struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+ u8 tx_pwr_state;
+ struct nphy_txgains target;
u16 tmp;
+ enum ieee80211_band tmp2;
+ bool do_rssi_cal;
- //TODO: Spectral management
+ u16 clip[2];
+ bool do_cal = false;
+
+ if ((dev->phy.rev >= 3) &&
+ (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
+ }
+ nphy->deaf_count = 0;
b43_nphy_tables_init(dev);
+ nphy->crsminpwr_adjusted = false;
+ nphy->noisevars_adjusted = false;
/* Clear all overrides */
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ }
b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ if (dev->phy.rev < 6) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ }
b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
~(B43_NPHY_RFSEQMODE_CAOVER |
B43_NPHY_RFSEQMODE_TROVER));
+ if (dev->phy.rev >= 3)
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
- tmp = (phy->rev < 2) ? 64 : 59;
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE,
- tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
-
+ if (dev->phy.rev <= 2) {
+ tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE,
+ tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
+ }
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- b43_phy_write(dev, B43_NPHY_TXREALFD, 184);
- b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200);
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80);
- b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511);
+ if (bus->sprom.boardflags2_lo & 0x100 ||
+ (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
+ bus->boardinfo.type == 0x8B))
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
+ else
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
+ b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
+ b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
- //TODO MIMO-Config
- //TODO Update TX/RX chain
+ b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+ b43_nphy_update_txrx_chain(dev);
if (phy->rev < 2) {
b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
}
+
+ tmp2 = b43_current_band(dev->wl);
+ if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
+ b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
+ nphy->papd_epsilon_offset[0] << 7);
+ b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
+ nphy->papd_epsilon_offset[1] << 7);
+ b43_nphy_int_pa_set_tx_dig_filters(dev);
+ } else if (phy->rev >= 5) {
+ b43_nphy_ext_pa_set_tx_dig_filters(dev);
+ }
+
b43_nphy_workarounds(dev);
- b43_nphy_reset_cca(dev);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN);
+ /* Reset CCA, in init code it differs a little from standard way */
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+
+ /* TODO N PHY MAC PHY Clock Set with argument 1 */
+
+ b43_nphy_pa_override(dev, false);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_pa_override(dev, true);
+
+ b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_read_clip_detection(dev, clip);
+ tx_pwr_state = nphy->txpwrctrl;
+ /* TODO N PHY TX power control with argument 0
+ (turning off power control) */
+ /* TODO Fix the TX Power Settings */
+ /* TODO N PHY TX Power Control Idle TSSI */
+ /* TODO N PHY TX Power Control Setup */
+
+ if (phy->rev >= 3) {
+ /* TODO */
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ }
+
+ if (nphy->phyrxchain != 3)
+ ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
+ if (nphy->mphase_cal_phase_id > 0)
+ ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+
+ do_rssi_cal = false;
+ if (phy->rev >= 3) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ else
+ do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+
+ if (do_rssi_cal)
+ b43_nphy_rssi_cal(dev);
+ else
+ b43_nphy_restore_rssi_cal(dev);
+ } else {
+ b43_nphy_rssi_cal(dev);
+ }
+
+ if (!((nphy->measure_hold & 0x6) != 0)) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_cal = (nphy->iqcal_chanspec_2G == 0);
+ else
+ do_cal = (nphy->iqcal_chanspec_5G == 0);
+
+ if (nphy->mute)
+ do_cal = false;
+
+ if (do_cal) {
+ target = b43_nphy_get_tx_gains(dev);
+
+ if (nphy->antsel_type == 2)
+ ;/*TODO NPHY Superswitch Init with argument 1*/
+ if (nphy->perical != 2) {
+ b43_nphy_rssi_cal(dev);
+ if (phy->rev >= 3) {
+ nphy->cal_orig_pwr_idx[0] =
+ nphy->txpwrindex[0].index_internal;
+ nphy->cal_orig_pwr_idx[1] =
+ nphy->txpwrindex[1].index_internal;
+ /* TODO N PHY Pre Calibrate TX Gain */
+ target = b43_nphy_get_tx_gains(dev);
+ }
+ }
+ }
+ }
+
+ if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
+ if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
+ ;/* Call N PHY Save Cal */
+ else if (nphy->mphase_cal_phase_id == 0)
+ ;/* N PHY Periodic Calibration with argument 3 */
+ } else {
+ b43_nphy_restore_cal(dev);
+ }
- b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */
- //TODO read core1/2 clip1 thres regs
-
- if (1 /* FIXME Band is 2.4GHz */)
- b43_nphy_bphy_init(dev);
- //TODO disable TX power control
- //TODO Fix the TX power settings
- //TODO Init periodic calibration with reason 3
- b43_nphy_rssi_cal(dev, 2);
- b43_nphy_rssi_cal(dev, 0);
- b43_nphy_rssi_cal(dev, 1);
- //TODO get TX gain
- //TODO init superswitch
- //TODO calibrate LO
- //TODO idle TSSI TX pctl
- //TODO TX power control power setup
- //TODO table writes
- //TODO TX power control coefficients
- //TODO enable TX power control
- //TODO control antenna selection
- //TODO init radar detection
- //TODO reset channel if changed
+ b43_nphy_tx_pwr_ctrl_coef_setup(dev);
+ /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
+ b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
+ b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
+ if (phy->rev >= 3 && phy->rev <= 6)
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_nphy_tx_lp_fbw(dev);
+ /* TODO N PHY Spur Workaround */
b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147d..ae82f0fc2096 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
+#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
+#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
+#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
@@ -919,8 +924,95 @@
struct b43_wldev;
+struct b43_phy_n_iq_comp {
+ s16 a0;
+ s16 b0;
+ s16 a1;
+ s16 b1;
+};
+
+struct b43_phy_n_rssical_cache {
+ u16 rssical_radio_regs_2G[2];
+ u16 rssical_phy_regs_2G[12];
+
+ u16 rssical_radio_regs_5G[2];
+ u16 rssical_phy_regs_5G[12];
+};
+
+struct b43_phy_n_cal_cache {
+ u16 txcal_radio_regs_2G[8];
+ u16 txcal_coeffs_2G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_2G;
+
+ u16 txcal_radio_regs_5G[8];
+ u16 txcal_coeffs_5G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_5G;
+};
+
+struct b43_phy_n_txpwrindex {
+ s8 index;
+ s8 index_internal;
+ s8 index_internal_save;
+ u16 AfectrlOverride;
+ u16 AfeCtrlDacGain;
+ u16 rad_gain;
+ u8 bbmult;
+ u16 iqcomp_a;
+ u16 iqcomp_b;
+ u16 locomp;
+};
+
struct b43_phy_n {
- //TODO lots of missing stuff
+ u8 antsel_type;
+ u8 cal_orig_pwr_idx[2];
+ u8 measure_hold;
+ u8 phyrxchain;
+ u8 perical;
+ u32 deaf_count;
+ u32 rxcalparams;
+ bool hang_avoid;
+ bool mute;
+ u16 papd_epsilon_offset[2];
+ s32 preamble_override;
+ u32 bb_mult_save;
+
+ bool gain_boost;
+ bool elna_gain_config;
+ bool band5g_pwrgain;
+
+ u8 mphase_cal_phase_id;
+ u16 mphase_txcal_cmdidx;
+ u16 mphase_txcal_numcmds;
+ u16 mphase_txcal_bestcoeffs[11];
+
+ u8 txpwrctrl;
+ u16 txcal_bbmult;
+ u16 txiqlocal_bestc[11];
+ bool txiqlocal_coeffsvalid;
+ struct b43_phy_n_txpwrindex txpwrindex[2];
+
+ u8 txrx_chain;
+ u16 tx_rx_cal_phy_saveregs[11];
+ u16 tx_rx_cal_radio_saveregs[22];
+
+ u16 rfctrl_intc1_save;
+ u16 rfctrl_intc2_save;
+
+ u16 classifier_state;
+ u16 clip_state[2];
+
+ bool ipa2g_on;
+ u8 iqcal_chanspec_2G;
+ u8 rssical_chanspec_2G;
+
+ bool ipa5g_on;
+ u8 iqcal_chanspec_5G;
+ u8 rssical_chanspec_5G;
+
+ struct b43_phy_n_rssical_cache rssical_cache;
+ struct b43_phy_n_cal_cache cal_cache;
+ bool crsminpwr_adjusted;
+ bool noisevars_adjusted;
};
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e2336315545..a00d509150f7 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
}
-const u8 b43_ntab_adjustpower0[] = {
+static const u8 b43_ntab_adjustpower0[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u8 b43_ntab_adjustpower1[] = {
+static const u8 b43_ntab_adjustpower1[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u16 b43_ntab_bdi[] = {
+static const u16 b43_ntab_bdi[] = {
0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
};
-const u32 b43_ntab_channelest[] = {
+static const u32 b43_ntab_channelest[] = {
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
0x10101010, 0x10101010, 0x10101010, 0x10101010,
};
-const u8 b43_ntab_estimatepowerlt0[] = {
+static const u8 b43_ntab_estimatepowerlt0[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_estimatepowerlt1[] = {
+static const u8 b43_ntab_estimatepowerlt1[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_framelookup[] = {
+static const u8 b43_ntab_framelookup[] = {
0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
};
-const u32 b43_ntab_framestruct[] = {
+static const u32 b43_ntab_framestruct[] = {
0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
0x09804506, 0x00100030, 0x09804507, 0x00100030,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_gainctl0[] = {
+static const u32 b43_ntab_gainctl0[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_gainctl1[] = {
+static const u32 b43_ntab_gainctl1[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_intlevel[] = {
+static const u32 b43_ntab_intlevel[] = {
0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
0x00C1188D, 0x080024D2, 0x00000070,
};
-const u32 b43_ntab_iqlt0[] = {
+static const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u32 b43_ntab_iqlt1[] = {
+static const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u16 b43_ntab_loftlt0[] = {
+static const u16 b43_ntab_loftlt0[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
0x0002, 0x0103,
};
-const u16 b43_ntab_loftlt1[] = {
+static const u16 b43_ntab_loftlt1[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
0x0002, 0x0103,
};
-const u8 b43_ntab_mcs[] = {
+static const u8 b43_ntab_mcs[] = {
0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-const u32 b43_ntab_noisevar10[] = {
+static const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u32 b43_ntab_noisevar11[] = {
+static const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u16 b43_ntab_pilot[] = {
+static const u16 b43_ntab_pilot[] = {
0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
};
-const u32 b43_ntab_pilotlt[] = {
+static const u32 b43_ntab_pilotlt[] = {
0x76540123, 0x62407351, 0x76543201, 0x76540213,
0x76540123, 0x76430521,
};
-const u32 b43_ntab_tdi20a0[] = {
+static const u32 b43_ntab_tdi20a0[] = {
0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi20a1[] = {
+static const u32 b43_ntab_tdi20a1[] = {
0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a0[] = {
+static const u32 b43_ntab_tdi40a0[] = {
0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a1[] = {
+static const u32 b43_ntab_tdi40a1[] = {
0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdtrn[] = {
+static const u32 b43_ntab_tdtrn[] = {
0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
};
-const u32 b43_ntab_tmap[] = {
+static const u32 b43_ntab_tmap[] = {
0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,544 @@ const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+const u32 b43_ntab_tx_gain_rev0_1_2[] = {
+ 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
+ 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
+ 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
+ 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
+ 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
+ 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
+ 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
+ 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
+ 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
+ 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
+ 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
+ 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
+ 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
+ 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
+ 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
+ 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
+ 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
+ 0x03902942, 0x03902844, 0x03902842, 0x03902744,
+ 0x03902742, 0x03902644, 0x03902642, 0x03902544,
+ 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
+ 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
+ 0x03802842, 0x03802744, 0x03802742, 0x03802644,
+ 0x03802642, 0x03802544, 0x03802542, 0x03802444,
+ 0x03802442, 0x03802344, 0x03802342, 0x03802244,
+ 0x03802242, 0x03802144, 0x03802142, 0x03802044,
+ 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
+ 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
+ 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
+ 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
+ 0x03801842, 0x03801744, 0x03801742, 0x03801644,
+ 0x03801642, 0x03801544, 0x03801542, 0x03801444,
+ 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
+};
+
+const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+ 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
+ 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
+ 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
+ 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
+ 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
+ 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
+ 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
+ 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
+ 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
+ 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
+ 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
+ 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
+ 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
+ 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
+ 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
+ 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
+ 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
+ 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
+ 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
+ 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
+ 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
+ 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
+ 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
+ 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
+ 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
+ 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
+ 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
+ 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
+ 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
+ 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
+ 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
+ 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
+};
+
+const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+ 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
+ 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
+ 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
+ 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
+ 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
+ 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
+ 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
+ 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
+ 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
+ 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
+ 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
+ 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
+ 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
+ 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
+ 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
+ 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
+ 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
+ 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
+ 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
+ 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
+ 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
+ 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
+ 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
+ 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
+ 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
+ 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
+ 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
+ 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
+ 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
+ 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
+ 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
+ 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
+};
+
+const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+ 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
+ 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
+ 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
+ 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
+ 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
+ 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
+ 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
+ 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
+ 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
+ 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
+ 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
+ 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
+ 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
+ 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
+ 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
+ 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
+ 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
+ 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
+ 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
+ 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
+ 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
+ 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
+ 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
+ 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
+ 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
+ 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
+ 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
+ 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
+ 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
+ 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
+ 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
+ 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
+};
+
+const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+ 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
+ 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
+ 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
+ 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
+ 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
+ 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
+ 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
+ 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
+ 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
+ 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
+ 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
+ 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
+ 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
+ 0x09620039, 0x09620037, 0x09620035, 0x09620033,
+ 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
+ 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
+ 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
+ 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
+ 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
+ 0x06620039, 0x06620037, 0x06620035, 0x06620033,
+ 0x05620046, 0x05620044, 0x05620042, 0x05620040,
+ 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
+ 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
+ 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
+ 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
+ 0x03620038, 0x03620037, 0x03620035, 0x03620033,
+ 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
+ 0x02620046, 0x02620044, 0x02620043, 0x02620042,
+ 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
+ 0x01620043, 0x01620042, 0x01620041, 0x01620040,
+ 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
+ 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
+};
+
+const u32 txpwrctrl_tx_gain_ipa[] = {
+ 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
+ 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
+ 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
+ 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
+ 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
+ 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
+ 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
+ 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
+ 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
+ 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
+ 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
+ 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
+ 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
+ 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
+ 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
+ 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
+ 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
+ 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
+ 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
+ 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
+ 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
+ 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
+ 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
+ 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
+ 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
+ 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
+ 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
+ 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
+ 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
+ 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
+ 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
+ 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+ 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
+ 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
+ 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
+ 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
+ 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
+ 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
+ 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
+ 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
+ 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
+ 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
+ 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
+ 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
+ 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
+ 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
+ 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
+ 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
+ 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
+ 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
+ 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
+ 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
+ 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
+ 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
+ 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
+ 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
+ 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
+ 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
+ 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
+ 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
+ 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
+ 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
+ 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
+ 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+ 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
+ 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
+ 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
+ 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
+ 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
+ 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
+ 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
+ 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
+ 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
+ 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
+ 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
+ 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
+ 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
+ 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
+ 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
+ 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
+ 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
+ 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
+ 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
+ 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
+ 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
+ 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
+ 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
+ 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
+ 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
+ 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
+ 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
+ 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
+ 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
+ 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
+ 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
+ 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+ 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
+ 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
+ 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
+ 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
+ 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
+ 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
+ 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
+ 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
+ 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
+ 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
+ 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
+ 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
+ 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
+ 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
+ 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
+ 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
+ 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
+ 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
+ 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
+ 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
+ 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
+ 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
+ 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
+ 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
+ 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
+ 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
+ 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
+ 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
+ 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
+ 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
+ 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
+ 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
+};
+
+const u16 tbl_iqcal_gainparams[2][9][8] = {
+ {
+ { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
+ { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
+ { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
+ { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
+ { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
+ { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
+ { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
+ { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
+ { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
+ },
+ {
+ { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
+ { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
+ { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
+ }
+};
+
+const struct nphy_txiqcal_ladder ladder_lo[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 25, 1 },
+ { 25, 2 },
+ { 25, 3 },
+ { 25, 4 },
+ { 25, 5 },
+ { 25, 6 },
+ { 25, 7 },
+ { 35, 7 },
+ { 50, 7 },
+ { 71, 7 },
+ { 100, 7 }
+};
+
+const struct nphy_txiqcal_ladder ladder_iq[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 35, 0 },
+ { 50, 0 },
+ { 71, 0 },
+ { 100, 0 },
+ { 100, 1 },
+ { 100, 2 },
+ { 100, 3 },
+ { 100, 4 },
+ { 100, 5 },
+ { 100, 6 },
+ { 100, 7 }
+};
+
+const u16 loscale[] = {
+ 256, 256, 271, 271,
+ 287, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 406, 406, 430,
+ 430, 455, 455, 482,
+ 482, 511, 511, 541,
+ 541, 573, 573, 607,
+ 607, 643, 643, 681,
+ 681, 722, 722, 764,
+ 764, 810, 810, 858,
+ 858, 908, 908, 962,
+ 962, 1019, 1019, 256
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 0x0200, 0x0300, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1201,
+ 0x1202, 0x1203, 0x1204, 0x1205,
+ 0x1206, 0x1207, 0x1907, 0x2307,
+ 0x3207, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 0x0300, 0x0500, 0x0700, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x1901,
+ 0x1902, 0x1903, 0x1904, 0x1905,
+ 0x1906, 0x1907, 0x2407, 0x3207,
+ 0x4607, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 0x0100, 0x0200, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1900,
+ 0x2300, 0x3200, 0x4700, 0x4701,
+ 0x4702, 0x4703, 0x4704, 0x4705,
+ 0x4706, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 0x0200, 0x0300, 0x0600, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x2400,
+ 0x3200, 0x4600, 0x6400, 0x6401,
+ 0x6402, 0x6403, 0x6404, 0x6405,
+ 0x6406, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
+
+const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
+
+const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 0x8423, 0x8323, 0x8073, 0x8256,
+ 0x8045, 0x8223, 0x9423, 0x9323,
+ 0x9073, 0x9256, 0x9045, 0x9223
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 0x8101, 0x8253, 0x8053, 0x8234,
+ 0x8034, 0x9101, 0x9253, 0x9053,
+ 0x9234, 0x9034
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 0x8123, 0x8264, 0x8086, 0x8245,
+ 0x8056, 0x9123, 0x9264, 0x9086,
+ 0x9245, 0x9056
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 0x8434, 0x8334, 0x8084, 0x8267,
+ 0x8056, 0x8234, 0x9434, 0x9334,
+ 0x9084, 0x9267, 0x9056, 0x9234
+};
+
+const s16 tbl_tx_filter_coef_rev4[7][15] = {
+ { -377, 137, -407, 208, -1527,
+ 956, 93, 186, 93, 230,
+ -44, 230, 20, -191, 201 },
+ { -77, 20, -98, 49, -93,
+ 60, 56, 111, 56, 26,
+ -5, 26, 34, -32, 34 },
+ { -360, 164, -376, 164, -1533,
+ 576, 308, -314, 308, 121,
+ -73, 121, 91, 124, 91 },
+ { -295, 200, -363, 142, -1391,
+ 826, 151, 301, 151, 151,
+ 301, 151, 602, -752, 602 },
+ { -92, 58, -96, 49, -104,
+ 44, 17, 35, 17, 12,
+ 25, 12, 13, 27, 13 },
+ { -375, 136, -399, 209, -1479,
+ 949, 130, 260, 130, 230,
+ -44, 230, 201, -191, 201 },
+ { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91,
+ 0x33a, 0x97, 0x12d, 0x97, 0x97,
+ 0x12d, 0x97, 0x25a, 0xd10, 0x25a }
+};
+
+/* addr0, addr1, bmask, shift */
+const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = {
+ { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */
+ { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */
+ { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */
+ { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */
+ { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */
+ { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */
+ { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */
+ { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */
+ { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */
+ { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */
+ { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */
+ { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */
+ { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */
+ { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */
+};
+
+/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */
+const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
+ { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */
+ { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
+ { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
+ { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
+ { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
+ { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
+ { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
+ { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
+ { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
+ { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
+ { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
+ { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
+ { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */
+ { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */
+ { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
+};
+
static inline void assert_ntab_array_sizes(void)
{
#undef check
@@ -2442,6 +2980,72 @@ static inline void assert_ntab_array_sizes(void)
#undef check
}
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_NTAB_8BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_NTAB_16BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ case B43_NTAB_32BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ value <<= 16;
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_NTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_NTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ *((u32 *)data) <<= 16;
+ *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
@@ -2474,3 +3078,91 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
/* Some compiletime assertions... */
assert_ntab_array_sizes();
}
+
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+#define ntab_upload(dev, offset, data) do { \
+ unsigned int i; \
+ for (i = 0; i < (offset##_SIZE); i++) \
+ b43_ntab_write(dev, (offset) + i, (data)[i]); \
+ } while (0)
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+
+ /* Volatile tables */
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
+ ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
+ ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+}
+
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ /* TODO */
+
+ /* Volatile tables */
+ /* TODO */
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec7..9c1c6ecd3672 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,27 @@ struct b43_nphy_channeltab_entry {
struct b43_wldev;
+struct nphy_txiqcal_ladder {
+ u8 percent;
+ u8 g_env;
+};
+
+struct nphy_rf_control_override_rev2 {
+ u8 addr0;
+ u8 addr1;
+ u16 bmask;
+ u8 shift;
+};
+
+struct nphy_rf_control_override_rev3 {
+ u16 val_mask;
+ u8 val_shift;
+ u8 en_addr0;
+ u8 val_addr0;
+ u8 en_addr1;
+ u8 val_addr1;
+};
+
/* Upload the default register value table.
* If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
* table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +147,57 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
+
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset);
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
-
-extern const u8 b43_ntab_adjustpower0[];
-extern const u8 b43_ntab_adjustpower1[];
-extern const u16 b43_ntab_bdi[];
-extern const u32 b43_ntab_channelest[];
-extern const u8 b43_ntab_estimatepowerlt0[];
-extern const u8 b43_ntab_estimatepowerlt1[];
-extern const u8 b43_ntab_framelookup[];
-extern const u32 b43_ntab_framestruct[];
-extern const u32 b43_ntab_gainctl0[];
-extern const u32 b43_ntab_gainctl1[];
-extern const u32 b43_ntab_intlevel[];
-extern const u32 b43_ntab_iqlt0[];
-extern const u32 b43_ntab_iqlt1[];
-extern const u16 b43_ntab_loftlt0[];
-extern const u16 b43_ntab_loftlt1[];
-extern const u8 b43_ntab_mcs[];
-extern const u32 b43_ntab_noisevar10[];
-extern const u32 b43_ntab_noisevar11[];
-extern const u16 b43_ntab_pilot[];
-extern const u32 b43_ntab_pilotlt[];
-extern const u32 b43_ntab_tdi20a0[];
-extern const u32 b43_ntab_tdi20a1[];
-extern const u32 b43_ntab_tdi40a0[];
-extern const u32 b43_ntab_tdi40a1[];
-extern const u32 b43_ntab_tdtrn[];
-extern const u32 b43_ntab_tmap[];
-
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
+
+extern const u32 b43_ntab_tx_gain_rev0_1_2[];
+extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
+extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
+
+extern const u32 txpwrctrl_tx_gain_ipa[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
+extern const u32 txpwrctrl_tx_gain_ipa_5g[];
+extern const u16 tbl_iqcal_gainparams[2][9][8];
+extern const struct nphy_txiqcal_ladder ladder_lo[];
+extern const struct nphy_txiqcal_ladder ladder_iq[];
+extern const u16 loscale[];
+
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
+extern const s16 tbl_tx_filter_coef_rev4[7][15];
+
+extern const struct nphy_rf_control_override_rev2
+ tbl_rf_control_override_rev2[];
+extern const struct nphy_rf_control_override_rev3
+ tbl_rf_control_override_rev3[];
#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index fbae264095cc..874a64a6c610 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
+MODULE_FIRMWARE("b43legacy/ucode2.fw");
+MODULE_FIRMWARE("b43legacy/ucode4.fw");
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
@@ -3960,7 +3962,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
static void b43legacy_print_driverinfo(void)
{
- const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
+ const char *feat_pci = "", *feat_leds = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3971,6 @@ static void b43legacy_print_driverinfo(void)
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
-#ifdef CONFIG_B43LEGACY_RFKILL
- feat_rfkill = "R";
-#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
@@ -3979,9 +3978,9 @@ static void b43legacy_print_driverinfo(void)
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s, Firmware-ID: "
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
- feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
+ feat_pci, feat_leds, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f2..4d97ae37499b 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
/* FIX: do we need mb/wmb/rmb with memory operations? */
-static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a44..fc04ccdc5bef 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f81..9b72c45a7748 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
-static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bedc..63c2a7ade5fb 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
}
/* PCI driver stuff */
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b16b06c2031f..dc8ed1527666 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,8 @@
config IWLWIFI
tristate "Intel Wireless Wifi"
- depends on PCI && MAC80211 && EXPERIMENTAL
+ depends on PCI && MAC80211
select FW_LOADER
-config IWLWIFI_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwlagn driver"
- depends on IWLWIFI
- ---help---
- This option will enable spectrum measurement for the iwlagn driver.
-
config IWLWIFI_DEBUG
bool "Enable full debugging output in iwlagn and iwl3945 drivers"
depends on IWLWIFI
@@ -120,9 +114,3 @@ config IWL3945
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwl3945.
-
-config IWL3945_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwl3945 driver"
- depends on IWL3945
- ---help---
- This option will enable spectrum measurement for the iwl3945 driver.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7f82044af242..4e378faee650 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -3,7 +3,6 @@ iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
iwlcore-objs += iwl-scan.o iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
CFLAGS_iwl-devtrace.o := -I$(src)
@@ -20,3 +19,5 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0db1fda94a65..9d1820676f30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,8 +89,78 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
+static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+ .min_nrg_cck = 95,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 90,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 120,
+ .auto_corr_min_ofdm_mrc_x1 = 240,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 155,
+ .auto_corr_max_ofdm_mrc_x1 = 290,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 170,
+ .auto_corr_max_cck_mrc = 400,
+ .nrg_th_cck = 95,
+ .nrg_th_ofdm = 95,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwl5000_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl1000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
static struct iwl_lib_ops iwl1000_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
+ .set_hw_params = iwl1000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
@@ -106,6 +176,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -139,6 +210,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static const struct iwl_ops iwl1000_ops = {
@@ -174,6 +246,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl1000_bg_cfg = {
@@ -200,6 +273,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 08ce259a0e60..042f6bc0df13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 6fd10d443ba3..3a876a8ece38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index a871d09d598f..abe2b739c4dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 5a1033ca7aaa..ce990adc51e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index d4b49883b30e..47909f94271e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 28ffe4c826d8..6940f086823c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1951,11 +1951,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
}
/* Add the broadcast address so we can send broadcast frames */
- if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
- IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
- return -EIO;
- }
+ priv->cfg->ops->lib->add_bcast_station(priv);
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -2796,6 +2792,7 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
+ .add_bcast_station = iwl3945_add_bcast_station,
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2830,6 +2827,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2847,9 +2845,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
-struct pci_device_id iwl3945_hw_card_ids[] = {
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 3ec2fe370b58..8f553f36d270 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern struct pci_device_id iwl3945_hw_card_ids[];
+extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index c606366b582c..67ef562e8db1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 78706ce8b7ae..aebe8c51d3e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1961,7 +1961,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int tid = MAX_TID_COUNT;
+ int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
@@ -2206,6 +2206,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static const struct iwl_ops iwl4965_ops = {
@@ -2239,6 +2240,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index bc056e9ab85f..714e032f6217 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index ec6b27689fa8..6d5988901341 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -263,8 +263,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 155,
- .auto_corr_max_ofdm_mrc_x1 = 290,
+ .auto_corr_max_ofdm_x1 = 120,
+ .auto_corr_max_ofdm_mrc_x1 = 240,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
@@ -412,12 +412,14 @@ static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
/*
* ucode
*/
-static int iwl5000_load_section(struct iwl_priv *priv,
- struct fw_desc *image,
- u32 dst_addr)
+static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
{
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -447,57 +449,36 @@ static int iwl5000_load_section(struct iwl_priv *priv,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
return ret;
}
if (!ret) {
- IWL_ERR(priv, "Could not load the INST uCode section\n");
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
return -ETIMEDOUT;
}
- priv->ucode_write_complete = 0;
-
- ret = iwl5000_load_section(
- priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
- if (ret)
- return ret;
+ return 0;
+}
- IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n");
+static int iwl5000_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ ret = iwl5000_load_section(priv, "INST", inst_image,
+ IWL50_RTC_INST_LOWER_BOUND);
+ if (ret)
return ret;
- } else if (!ret) {
- IWL_ERR(priv, "Could not load the DATA uCode section\n");
- return -ETIMEDOUT;
- } else
- ret = 0;
-
- priv->ucode_write_complete = 0;
- return ret;
+ return iwl5000_load_section(priv, "DATA", data_image,
+ IWL50_RTC_DATA_LOWER_BOUND);
}
int iwl5000_load_ucode(struct iwl_priv *priv)
@@ -781,7 +762,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
@@ -800,12 +781,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
if (txq_id != IWL_CMD_QUEUE_NUM)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1467,6 +1448,7 @@ struct iwl_lib_ops iwl5000_lib = {
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1502,6 +1484,7 @@ struct iwl_lib_ops iwl5000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static struct iwl_lib_ops iwl5150_lib = {
@@ -1555,6 +1538,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static const struct iwl_ops iwl5000_ops = {
@@ -1600,7 +1584,9 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1623,7 +1609,9 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5100_abg_cfg = {
@@ -1646,6 +1634,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1668,7 +1657,9 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1691,7 +1682,9 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1714,7 +1707,9 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -1737,6 +1732,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 90185777d98b..ddba39999997 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index a5a0ed4817a4..a9f8551e0e40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_ofdm = 145,
.auto_corr_max_ofdm_mrc = 232,
- .auto_corr_max_ofdm_x1 = 145,
+ .auto_corr_max_ofdm_x1 = 110,
.auto_corr_max_ofdm_mrc_x1 = 232,
.auto_corr_min_cck = 125,
@@ -158,11 +158,25 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_6x50:
+ priv->hw_params.calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+
+ break;
+ default:
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+ break;
+ }
+
return 0;
}
@@ -216,6 +230,7 @@ static struct iwl_lib_ops iwl6000_lib = {
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -251,6 +266,7 @@ static struct iwl_lib_ops iwl6000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static const struct iwl_ops iwl6000_ops = {
@@ -307,6 +323,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -336,6 +353,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,6 +383,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@@ -395,6 +414,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -424,6 +444,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -454,6 +475,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 3bccba20f6da..1a24946bc203 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index ab55f92a161d..a594e4fdc6b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index b93e49158196..6aebcedaca8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index affc0c5a2f2c..e71923961e69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -191,7 +191,7 @@ enum {
IWL_RATE_2M_MASK)
#define IWL_CCK_RATES_MASK \
- (IWL_BASIC_RATES_MASK | \
+ (IWL_CCK_BASIC_RATES_MASK | \
IWL_RATE_5M_MASK | \
IWL_RATE_11M_MASK)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 771b03c1c7c5..d0268280d679 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -73,13 +73,7 @@
#define VD
#endif
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
+#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -203,7 +197,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
priv->start_calib = 0;
/* Add the broadcast address so we can send broadcast frames */
- iwl_add_bcast_station(priv);
+ priv->cfg->ops->lib->add_bcast_station(priv);
+
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -704,7 +699,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
-void iwl_continuous_event_trace(struct iwl_priv *priv)
+static void iwl_continuous_event_trace(struct iwl_priv *priv)
{
u32 capacity; /* event log capacity in # entries */
u32 base; /* SRAM byte address of event log header */
@@ -888,6 +883,8 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -901,7 +898,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
@@ -1761,7 +1757,7 @@ static const char *desc_lookup_text[] = {
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
- "UNKNOWN"
+ "ADVANCED SYSASSERT"
};
static const char *desc_lookup(int i)
@@ -1965,7 +1961,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return pos;
+ return -EINVAL;
}
/* event log header */
@@ -2013,7 +2009,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
bufsz = size * 48;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
- return pos;
+ return -ENOMEM;
}
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
@@ -2443,18 +2439,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
return;
}
-static void iwl_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2471,7 +2455,13 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -2607,7 +2597,7 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_setup_mac(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2839,14 +2829,18 @@ void iwl_config_ap(struct iwl_priv *priv)
}
static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
+ iwl_update_tkip_key(priv, keyconf,
+ sta ? sta->addr : iwl_bcast_addr,
+ iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -2955,6 +2949,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
else
return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* do nothing */
+ return -EOPNOTSUPP;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -3004,6 +3001,8 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
break;
case STA_NOTIFY_AWAKE:
WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
sta_priv->asleep = false;
sta_id = iwl_find_station(priv, sta->addr);
if (sta_id != IWL_INVALID_STATION)
@@ -3280,7 +3279,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl_bg_up);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3365,6 +3363,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3616,9 +3615,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
- /**********************************
- * 8. Setup and register mac80211
- **********************************/
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
/* enable interrupts if needed: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
@@ -3629,14 +3628,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_enable_interrupts(priv);
- err = iwl_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -3648,6 +3639,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+
+ /**************************************************
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl_mac_setup_register(priv);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = iwl_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
+
return 0;
out_remove_sysfs:
@@ -3766,7 +3769,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static struct pci_device_id iwl_hw_card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index dc61906290e8..845831ac053e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index b6cef989a796..2b7b1df83ba0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 28f3800c560e..c2f31eb26bef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,7 +120,6 @@ enum {
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
/* 802.11h related */
- RADAR_NOTIFICATION = 0x70, /* not used */
REPLY_QUIET_CMD = 0x71, /* not used */
REPLY_CHANNEL_SWITCH = 0x72,
CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2248,10 +2247,22 @@ struct iwl_link_quality_cmd {
__le32 reserved2;
} __attribute__ ((packed));
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ * bit 2 - 1: BT 2 wire support enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+#define BT_ENABLE_2_WIRE BIT(2)
+
#define BT_COEX_DISABLE (0x0)
-#define BT_COEX_MODE_2W (0x1)
-#define BT_COEX_MODE_3W (0x2)
-#define BT_COEX_MODE_4W (0x3)
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
#define BT_LEAD_TIME_MIN (0x0)
#define BT_LEAD_TIME_DEF (0x1E)
@@ -2984,7 +2995,7 @@ struct statistics_rx_ht_phy {
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
- __le32 reserved2;
+ __le32 unsupport_mcs;
} __attribute__ ((packed));
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3098,8 @@ struct statistics_div {
} __attribute__ ((packed));
struct statistics_general {
- __le32 temperature;
- __le32 temperature_m;
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* for 5000 and up, this is radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3096,7 +3107,12 @@ struct statistics_general {
__le32 ttl_timestamp;
struct statistics_div div;
__le32 rx_enable_counter;
- __le32 reserved1;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
__le32 reserved2;
__le32 reserved3;
} __attribute__ ((packed));
@@ -3161,13 +3177,30 @@ struct iwl_notif_statistics {
/*
* MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
*/
-/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
- * then this notification will be sent. */
-#define CONSECUTIVE_MISSED_BCONS_TH 20
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
struct iwl_missed_beacon_notif {
- __le32 consequtive_missed_beacons;
+ __le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 14f482960d7f..02bf17ecaf54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -47,6 +47,26 @@ MODULE_VERSION(IWLWIFI_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
+
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
@@ -257,8 +277,8 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
- /* Set interrupt coalescing timer to 512 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1353,6 +1373,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
priv->cfg->ops->lib->dump_nic_error_log(priv);
if (priv->cfg->ops->lib->dump_csr)
priv->cfg->ops->lib->dump_csr(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
@@ -1803,6 +1825,16 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (val == 0xffffffff)
val = 0;
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
inta, inta_mask, val);
@@ -1965,13 +1997,20 @@ EXPORT_SYMBOL(iwl_isr_legacy);
int iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
- .flags = BT_COEX_MODE_4W,
.lead_time = BT_LEAD_TIME_DEF,
.max_kill = BT_MAX_KILL_DEF,
.kill_ack_mask = 0,
.kill_cts_mask = 0,
};
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
sizeof(struct iwl_bt_cmd), &bt_cmd);
}
@@ -2334,6 +2373,21 @@ static void iwl_ht_conf(struct iwl_priv *priv,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static inline void iwl_set_no_assoc(struct iwl_priv *priv)
+{
+ priv->assoc_id = 0;
+ iwl_led_disassociate(priv);
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ priv->staging_rxon.assoc_id = 0;
+ iwlcore_commit_rxon(priv);
+}
+
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2465,20 +2519,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
priv->cfg->ops->lib->post_associate(priv);
- } else {
- priv->assoc_id = 0;
- iwl_led_disassociate(priv);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * send
- */
- priv->staging_rxon.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = 0;
- iwlcore_commit_rxon(priv);
- }
+ } else
+ iwl_set_no_assoc(priv);
}
if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2493,12 +2535,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
- vif->bss_conf.enable_beacon) {
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ iwlcore_config_ap(priv);
+ } else
+ iwl_set_no_assoc(priv);
}
mutex_unlock(&priv->mutex);
@@ -2587,23 +2631,21 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
- unsigned long flags;
+ int err = 0;
IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
+ mutex_lock(&priv->mutex);
+
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
priv->vif = vif;
priv->iw_mode = vif->type;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- mutex_lock(&priv->mutex);
-
if (vif->addr) {
IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
@@ -2613,10 +2655,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
/* we are not ready, will run again when ready */
set_bit(STATUS_MODE_PENDING, &priv->status);
+ out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return err;
}
EXPORT_SYMBOL(iwl_mac_add_interface);
@@ -3263,6 +3306,93 @@ void iwl_dump_csr(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_dump_csr);
+const static char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_dump_fh);
+
+void iwl_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_is_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_internal_short_hw_scan(priv);
+ return;
+}
+EXPORT_SYMBOL(iwl_force_rf_reset);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3ef86f6c7755..ec1fe1d7cc9a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <generated/utsrelease.h>
-
/************************
* forward declarations *
************************/
@@ -72,8 +70,8 @@ struct iwl_host_cmd;
struct iwl_cmd;
-#define IWLWIFI_VERSION UTS_RELEASE "-k"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -173,6 +171,7 @@ struct iwl_lib_ops {
bool full_log, char **buf, bool display);
void (*dump_nic_error_log)(struct iwl_priv *priv);
void (*dump_csr)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -189,6 +188,8 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
+ /* station management */
+ void (*add_bcast_station)(struct iwl_priv *priv);
};
struct iwl_led_ops {
@@ -233,6 +234,8 @@ struct iwl_mod_params {
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
* @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -289,6 +292,7 @@ struct iwl_cfg {
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
};
/***************************
@@ -425,6 +429,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -495,6 +501,8 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_internal_short_hw_scan(struct iwl_priv *priv);
+void iwl_force_rf_reset(struct iwl_priv *priv);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
@@ -525,14 +533,6 @@ int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
void iwl_calib_free_results(struct iwl_priv *priv);
-/*******************************************************************************
- * Spectrum Measureemtns in iwl-spectrum.c
- ******************************************************************************/
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
-#else
-static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
-#endif
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
@@ -584,6 +584,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv);
int iwl_dump_nic_event_log(struct iwl_priv *priv,
bool full_log, char **buf, bool display);
void iwl_dump_csr(struct iwl_priv *priv);
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 1ec8cb4d5eae..1e00720bf8b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 58e0462cafa3..1c7b53d511c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -67,59 +67,6 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-struct iwl_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *dir_data;
- struct dentry *dir_debug;
- struct dentry *dir_rf;
- struct dir_data_files {
- struct dentry *file_sram;
- struct dentry *file_nvm;
- struct dentry *file_stations;
- struct dentry *file_log_event;
- struct dentry *file_channels;
- struct dentry *file_status;
- struct dentry *file_interrupt;
- struct dentry *file_qos;
- struct dentry *file_thermal_throttling;
- struct dentry *file_led;
- struct dentry *file_disable_ht40;
- struct dentry *file_sleep_level_override;
- struct dentry *file_current_sleep_command;
- } dbgfs_data_files;
- struct dir_rf_files {
- struct dentry *file_disable_sensitivity;
- struct dentry *file_disable_chain_noise;
- struct dentry *file_disable_tx_power;
- } dbgfs_rf_files;
- struct dir_debug_files {
- struct dentry *file_rx_statistics;
- struct dentry *file_tx_statistics;
- struct dentry *file_traffic_log;
- struct dentry *file_rx_queue;
- struct dentry *file_tx_queue;
- struct dentry *file_ucode_rx_stats;
- struct dentry *file_ucode_tx_stats;
- struct dentry *file_ucode_general_stats;
- struct dentry *file_sensitivity;
- struct dentry *file_chain_noise;
- struct dentry *file_tx_power;
- struct dentry *file_power_save_status;
- struct dentry *file_clear_ucode_statistics;
- struct dentry *file_clear_traffic_statistics;
- struct dentry *file_csr;
- struct dentry *file_ucode_tracing;
- } dbgfs_debug_files;
- u32 sram_offset;
- u32 sram_len;
-};
-
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
-#endif
-
#else
#define IWL_DEBUG(__priv, level, fmt, args...)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
@@ -128,9 +75,10 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
{}
#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-#ifndef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_dbgfs_unregister(struct iwl_priv *priv);
+#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ee5aed12a4b1..d134301b553c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -41,43 +41,28 @@
#include "iwl-calib.h"
/* create and remove of files */
-#define DEBUGFS_ADD_DIR(name, parent) do { \
- dbgfs->dir_##name = debugfs_create_dir(#name, parent); \
- if (!(dbgfs->dir_##name)) \
- goto err; \
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, mode, \
- dbgfs->dir_##parent, priv, \
- &iwl_dbgfs_##name##_ops); \
- if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
- goto err; \
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_x32(#name, S_IRUSR, dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_REMOVE(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0);
-
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -125,7 +110,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
@@ -184,7 +169,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
int cnt;
@@ -232,28 +217,28 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
ssize_t ret;
int i;
int pos = 0;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
size_t bufsz;
/* default is to dump the entire data segment */
- if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
- priv->dbgfs->sram_offset = 0x800000;
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs->sram_len = priv->ucode_init_data.len;
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
else
- priv->dbgfs->sram_len = priv->ucode_data.len;
+ priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10;
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs->sram_len);
+ priv->dbgfs_sram_len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs->sram_offset);
- for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
- priv->dbgfs->sram_len - i);
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
if (i < 4) {
switch (i) {
case 1:
@@ -293,11 +278,11 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs->sram_offset = offset;
- priv->dbgfs->sram_len = len;
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
} else {
- priv->dbgfs->sram_offset = 0;
- priv->dbgfs->sram_len = 0;
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
}
return count;
@@ -306,7 +291,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
int max_sta = priv->hw_params.max_stations;
char *buf;
@@ -376,7 +361,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
loff_t *ppos)
{
ssize_t ret;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
@@ -429,8 +414,9 @@ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
int pos = 0;
ssize_t ret = -ENOMEM;
- pos = priv->cfg->ops->lib->dump_nic_event_log(priv, true, &buf, true);
- if (pos && buf) {
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
}
@@ -464,7 +450,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct ieee80211_channel *channels = NULL;
const struct ieee80211_supported_band *supp_band = NULL;
int pos = 0, i, bufsz = PAGE_SIZE;
@@ -537,7 +523,7 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -585,7 +571,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -672,7 +658,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -695,7 +681,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -721,7 +707,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
char buf[100];
@@ -781,7 +767,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -829,7 +815,9 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
priv->power_data.debug_sleep_level_override = value;
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -838,7 +826,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[10];
int pos, value;
const size_t bufsz = sizeof(buf);
@@ -856,7 +844,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[200];
int pos = 0, i;
const size_t bufsz = sizeof(buf);
@@ -994,7 +982,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
@@ -1040,7 +1028,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_rx_queue *rxq = &priv->rxq;
char buf[256];
int pos = 0;
@@ -1081,36 +1069,33 @@ static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
return p;
}
+static const char ucode_stats_header[] =
+ "%-32s current acumulative delta max\n";
+static const char ucode_stats_short_format[] =
+ " %-30s %10u\n";
+static const char ucode_stats_format[] =
+ " %-30s %10u %10u %10u %10u\n";
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 20 +
- sizeof(struct statistics_rx_non_phy) * 20 +
- sizeof(struct statistics_rx_ht_phy) * 20 + 400;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm;
- struct statistics_rx_phy *cck, *accum_cck;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_ht_phy *ht, *accum_ht;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1129,264 +1114,401 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_cck = &priv->accum_statistics.rx.cck;
accum_general = &priv->accum_statistics.rx.general;
accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_cts:",
le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_ack:",
le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_bssid_frames:\t%u\t\t\t%u\n",
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "filtered_frames:\t%u\t\t\t%u\n",
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "filtered_frames:",
le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_beacons:",
le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "num_missed_bcon:\t%u\t\t\t%u\n",
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- "adc_rx_saturation_time:\t%u\t\t\t%u\n",
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ina_detect_search_tm:\t%u\t\t\t%u\n",
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "interference_data_flag:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_load:\t\t%u\t\t\t%u\n",
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_load:",
le32_to_cpu(general->channel_load),
- accum_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_false_alarms:\t%u\t\t\t%u\n",
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_a:\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c);
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1397,26 +1519,16 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
ssize_t ret;
- struct statistics_tx *tx, *accum_tx;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1429,106 +1541,148 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
*/
tx = &priv->statistics.tx;
accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "preamble:",
le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_timeout:",
le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_nxt_frame_mismatch:"
- "\t%u\t\t\t%u\n",
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_missing_nxt_frame:"
- "\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg underrun:\t\t\t%u\t\t\t%u\n",
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg underrun:",
le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt);
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1539,28 +1693,19 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_general) * 4 + 250;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
ssize_t ret;
struct statistics_general *general, *accum_general;
- struct statistics_dbg *dbg, *accum_dbg;
- struct statistics_div *div, *accum_div;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1575,52 +1720,78 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
dbg = &priv->statistics.general.dbg;
div = &priv->statistics.general.div;
accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature:",
le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature_m:",
le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_check:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_check:",
le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_count:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_count:",
le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sleep_time:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sleep_time:",
le32_to_cpu(general->sleep_time),
- accum_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_out:\t\t\t%u\t\t\t%u\n",
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_out:",
le32_to_cpu(general->slots_out),
- accum_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_idle:\t\t\t%u\t\t\t%u\n",
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_idle:",
le32_to_cpu(general->slots_idle),
- accum_general->slots_idle);
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "exec_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->exec_time), accum_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "probe_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->probe_time), accum_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_enable_counter:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter);
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1630,7 +1801,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1711,7 +1882,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1769,26 +1940,15 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[128];
int pos = 0;
- ssize_t ret;
const size_t bufsz = sizeof(buf);
struct statistics_tx *tx;
if (!iwl_is_alive(priv))
pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
else {
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv, "Error sending statistics request: %zd\n",
- ret);
- return -EAGAIN;
- }
tx = &priv->statistics.tx;
if (tx->tx_power.ant_a ||
tx->tx_power.ant_b ||
@@ -1820,7 +1980,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[60];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1937,6 +2097,132 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int scan;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &scan) != 1)
+ return -EINVAL;
+
+ iwl_internal_short_hw_scan(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->plcp_delta_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+ else
+ priv->cfg->plcp_delta_threshold = plcp;
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1953,6 +2239,10 @@ DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_WRITE_FILE_OPS(internal_scan);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
/*
* Create the debugfs files and directories
@@ -1960,71 +2250,73 @@ DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
*/
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
- struct iwl_debugfs *dbgfs;
struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- int ret = 0;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
- dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- ret = -ENOMEM;
- goto err;
- }
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, phyd);
- if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
- ret = -ENOENT;
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
goto err;
- }
- DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
- DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
- DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
- DEBUGFS_ADD_FILE(csr, debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tracing, debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
}
- DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_ADD_BOOL(disable_tx_power, rf,
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
err:
- IWL_ERR(priv, "Can't open the debugfs directory\n");
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
iwl_dbgfs_unregister(priv);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(iwl_dbgfs_register);
@@ -2034,59 +2326,11 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
*/
void iwl_dbgfs_unregister(struct iwl_priv *priv)
{
- if (!priv->dbgfs)
+ if (!priv->debugfs_dir)
return;
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sleep_level_override);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_current_sleep_command);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
- DEBUGFS_REMOVE(priv->dbgfs->dir_data);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_traffic_log);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_ucode_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_traffic_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_csr);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_rx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_tx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_general_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_chain_noise);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_tracing);
- }
- DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
- DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
- kfree(priv->dbgfs);
- priv->dbgfs = NULL;
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
}
EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 42f9b17327c3..55dc5a866542 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -712,7 +712,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
- return q->write_ptr > q->read_ptr ?
+ return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
@@ -1011,6 +1011,30 @@ struct iwl_event_log {
int wraps_more_count;
};
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1031,13 +1055,16 @@ struct iwl_priv {
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-#if defined(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
-#endif
+
/* ucode beacon time */
u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
/* we allocate array of iwl4965_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
@@ -1056,14 +1083,15 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long last_scan_jiffies;
unsigned long next_scan_jiffies;
unsigned long scan_start;
unsigned long scan_pass_start;
unsigned long scan_start_tsf;
+ unsigned long last_internal_scan_jiffies;
void *scan;
int scan_bands;
struct cfg80211_scan_request *scan_request;
+ bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1162,6 +1190,8 @@ struct iwl_priv {
struct iwl_notif_statistics statistics;
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
#endif
/* context information */
@@ -1234,15 +1264,10 @@ struct iwl_priv {
struct workqueue_struct *workqueue;
- struct work_struct up;
struct work_struct restart;
- struct work_struct calibrated_work;
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct update_link_led;
- struct work_struct auth_work;
- struct work_struct report_work;
struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
@@ -1278,7 +1303,8 @@ struct iwl_priv {
u16 rx_traffic_idx;
u8 *tx_traffic;
u8 *rx_traffic;
- struct iwl_debugfs *dbgfs;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index bf46308b17fa..36580d8d8b8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,3 +1,29 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 0819f990be6c..ff4d012ce260 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,3 +1,29 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_DEVICE_TRACE
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a30969689ff..fd37152abae3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 0cd9c02ee044..4e1ba824dc50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 65fa8a69fd5a..113c3669b9ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -379,6 +379,25 @@
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54ec..86783c27d97c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(RADAR_NOTIFICATION);
IWL_CMD(REPLY_QUIET_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index bd0b12efb5c7..45af5bbc1c56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index e552d4c4bdbe..c719baf2585a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46c7a95b88f0..a6f9c918aabc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index f47f053f02ea..49a70baa3fb6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8ccc0bb1d9ed..1a1a9f081cc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -303,13 +303,12 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
sizeof(struct iwl_powertable_cmd), cmd);
}
-
+/* priv->mutex must be held */
int iwl_power_update_mode(struct iwl_priv *priv, bool force)
{
int ret = 0;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) &&
- (priv->hw->conf.flags & IEEE80211_CONF_PS);
+ bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
bool update_chains;
struct iwl_powertable_cmd cmd;
int dtimper;
@@ -319,7 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
if (priv->vif)
- dtimper = priv->vif->bss_conf.dtim_period;
+ dtimper = priv->hw->conf.ps_dtim_period;
else
dtimper = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 310c32e8f698..5db91c10dcc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6d95832db06d..d2d2a9174900 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f36b6e79f5e..5df66382d922 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -473,8 +473,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 0x40);
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
@@ -499,9 +499,10 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consequtive_missed_beacons),
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -511,6 +512,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
+
+
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
@@ -564,15 +583,24 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
int i;
__le32 *prev_stats;
u32 *accum_stats;
+ u32 *delta, *max_delta;
prev_stats = (__le32 *)&priv->statistics;
accum_stats = (u32 *)&priv->accum_statistics;
+ delta = (u32 *)&priv->delta_statistics;
+ max_delta = (u32 *)&priv->max_delta;
for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
- *accum_stats += (le32_to_cpu(*stats) -
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
/* reset accumulative statistics for "no-counter" type statistics */
priv->accum_statistics.general.temperature =
@@ -592,11 +620,15 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
+#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(priv->statistics),
@@ -611,6 +643,56 @@ void iwl_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_DEBUG
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
+ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ iwl_force_rf_reset(priv);
+ }
+ }
+
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -638,11 +720,13 @@ void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
- memset(&priv->statistics, 0,
- sizeof(struct iwl_notif_statistics));
#ifdef CONFIG_IWLWIFI_DEBUG
memset(&priv->accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
+ memset(&priv->delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index fa1c89ba6459..08faafae8497 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -192,19 +192,17 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec (%dms since last)\n",
+ "elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
- jiffies_to_msecs(elapsed_jiffies
- (priv->last_scan_jiffies, jiffies)));
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -250,8 +248,11 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
goto reschedule;
}
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
+ else
+ priv->last_internal_scan_jiffies = jiffies;
+
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
@@ -314,6 +315,72 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
@@ -421,6 +488,7 @@ static int iwl_scan_initiate(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@@ -461,15 +529,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* if we just finished scan ask for delay */
- if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
- time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
priv->scan_bands = 0;
for (i = 0; i < req->n_channels; i++)
priv->scan_bands |= BIT(req->channels[i]->band);
@@ -488,6 +547,54 @@ out_unlock:
}
EXPORT_SYMBOL(iwl_mac_hw_scan);
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+#define IWL_DELAY_NEXT_INTERNAL_SCAN (HZ*1)
+
+int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ if (!iwl_is_ready_rf(priv)) {
+ ret = -EIO;
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+ goto out;
+ }
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (priv->last_internal_scan_jiffies &&
+ time_after(priv->last_internal_scan_jiffies +
+ IWL_DELAY_NEXT_INTERNAL_SCAN, jiffies)) {
+ IWL_DEBUG_SCAN(priv, "internal scan rejected\n");
+ goto out;
+ }
+
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
+ else
+ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+
+ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = true;
+ queue_work(priv->workqueue, &priv->request_scan);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_internal_short_hw_scan);
+
#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
void iwl_bg_scan_check(struct work_struct *data)
@@ -551,7 +658,8 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (WARN_ON(left < ie_len))
return len;
- memcpy(pos, ies, ie_len);
+ if (ies)
+ memcpy(pos, ies, ie_len);
len += ie_len;
left -= ie_len;
@@ -654,7 +762,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
spin_lock_irqsave(&priv->lock, flags);
interval = priv->beacon_int;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -672,7 +779,9 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -753,24 +862,38 @@ static void iwl_bg_request_scan(struct work_struct *data)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ }
scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
-
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
goto done;
@@ -831,7 +954,12 @@ void iwl_bg_scan_completed(struct work_struct *work)
cancel_delayed_work(&priv->scan_check);
- ieee80211_scan_completed(priv->hw, false);
+ if (!priv->is_internal_short_scan)
+ ieee80211_scan_completed(priv->hw, false);
+ else {
+ priv->is_internal_short_scan = false;
+ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
deleted file mode 100644
index 1ea5cd345fe8..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-spectrum.h"
-
-#define BEACON_TIME_MASK_LOW 0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH 0xFF000000
-#define TIME_UNIT 1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-/* TOOD: was used in sysfs debug interface need to add to mac */
-#if 0
-static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * 1024;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
- rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
- return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & BEACON_TIME_MASK_LOW;
- u32 addon_low = addon & BEACON_TIME_MASK_LOW;
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & BEACON_TIME_MASK_HIGH) +
- (addon & BEACON_TIME_MASK_HIGH);
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << 24);
- } else
- res += (1 << 24);
-
- return cpu_to_le32(res);
-}
-static int iwl_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl4965_spectrum_cmd spectrum;
- struct iwl_rx_packet *res;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .meta.flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
-
- if (iwl_is_associated(priv))
- add_time =
- iwl_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_is_associated(priv))
- spectrum.start_time =
- iwl_add_beacon_time(priv->last_beacon_time,
- add_time,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (res->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv,
- "Replaced existing measurement: %d\n",
- res->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- dev_kfree_skb_any(cmd.meta.u.skb);
-
- return rc;
-}
-#endif
-
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
-}
-EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a77c1e619062..af6babee2891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ieee80211 subsystem header files.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cde09a890b73..4a6686fa6b36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,46 +80,103 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
}
EXPORT_SYMBOL(iwl_get_ra_sta_id);
+/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
- sta_id);
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
- priv->stations[sta_id].sta.sta.addr);
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
}
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
+ pkt->hdr.flags);
return;
}
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
switch (pkt->u.add_sta.status) {
case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
iwl_sta_ucode_activate(priv, sta_id);
- /* fall through */
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ sta_id);
+ break;
default:
- IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
break;
}
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+
+ /*
+ * Determine if we wanted to modify or add a station,
+ * if adding a station succeeded we have some more initialization
+ * to do when using station notification. TODO
+ */
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static void iwl_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_process_add_sta_resp(priv, addsta, pkt, false);
+
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -145,24 +202,9 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
if (ret == 0) {
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- iwl_sta_ucode_activate(priv, sta->sta.sta_id);
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_WARN(priv, "REPLY_ADD_STA failed\n");
- break;
- }
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ iwl_process_add_sta_resp(priv, sta, pkt, true);
}
iwl_free_pages(priv, cmd.reply_page);
@@ -297,7 +339,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
}
EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
{
unsigned long flags;
u8 sta_id = iwl_find_station(priv, addr);
@@ -324,7 +366,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
{
struct iwl_rem_sta_cmd *rm_sta =
(struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const char *addr = rm_sta->addr;
+ const u8 *addr = rm_sta->addr;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
@@ -1003,24 +1045,19 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
u8 sta_id;
- /* Add station to device's station table */
-
/*
- * XXX: This check is definitely not correct, if we're an AP
- * it'll always be false which is not what we want, but
- * it doesn't look like iwlagn is prepared to be an HT
- * AP anyway.
+ * Set HT capabilities. It is ok to set this struct even if not using
+ * HT config: the priv->current_ht_config.is_ht flag will just be false
*/
- if (priv->current_ht_config.is_ht) {
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, addr);
+ if (sta) {
+ memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
+ cur_ht_config = &ht_config;
}
+ rcu_read_unlock();
+ /* Add station to device's station table */
sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
/* Set up default rate scaling table in device's station table */
@@ -1085,6 +1122,7 @@ static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
*/
void iwl_add_bcast_station(struct iwl_priv *priv)
{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
/* Set up default rate scaling table in device's station table */
@@ -1093,6 +1131,16 @@ void iwl_add_bcast_station(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_add_bcast_station);
/**
+ * iwl3945_add_bcast_station - add broadcast station into station table.
+ */
+void iwl3945_add_bcast_station(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
+ iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+}
+EXPORT_SYMBOL(iwl3945_add_bcast_station);
+
+/**
* iwl_get_sta_id - Find station's index within station table
*
* If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 8d052de2d405..2dc35fe28f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,6 +53,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_add_bcast_station(struct iwl_priv *priv);
+void iwl3945_add_bcast_station(struct iwl_priv *priv);
int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_clear_stations_table(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..d365d13e3291 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 10b0aa8024c4..119da54116de 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -56,6 +56,7 @@
#include "iwl-helpers.h"
#include "iwl-core.h"
#include "iwl-dev.h"
+#include "iwl-spectrum.h"
/*
* module name, copyright, version, etc.
@@ -70,14 +71,13 @@
#define VD
#endif
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -689,10 +689,6 @@ drop:
return -1;
}
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
-#include "iwl-spectrum.h"
-
#define BEACON_TIME_MASK_LOW 0x00FFFFFF
#define BEACON_TIME_MASK_HIGH 0xFF000000
#define TIME_UNIT 1024
@@ -819,7 +815,6 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
return rc;
}
-#endif
static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -962,6 +957,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -975,7 +972,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
@@ -1644,7 +1640,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (!iwl3945_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return pos;
+ return -EINVAL;
}
/* event log header */
@@ -1693,7 +1689,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
bufsz = size * 48;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
- return pos;
+ return -ENOMEM;
}
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
@@ -3037,18 +3033,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-static void iwl3945_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl3945_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -3065,7 +3049,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl3945_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl3945_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -3569,8 +3559,6 @@ static ssize_t store_filter_flags(struct device *d,
static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
store_filter_flags);
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
static ssize_t show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3640,7 +3628,6 @@ static ssize_t store_measurement(struct device *d,
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
show_measurement, store_measurement);
-#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
static ssize_t store_retry_rate(struct device *d,
struct device_attribute *attr,
@@ -3789,7 +3776,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl3945_bg_up);
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3823,9 +3809,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_dump_errors.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
&dev_attr_measurement.attr,
-#endif
&dev_attr_retry_rate.attr,
&dev_attr_statistics.attr,
&dev_attr_status.attr,
@@ -3881,6 +3865,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
iwl_reset_qos(priv);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d76a88..1e41ad0fcad5 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
+ update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
+ update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
+ sizeof(struct iwm_umac_wifi_if));
+
update.command = cpu_to_le32(command);
if (pmksa->bssid)
memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af0552cd75..3dfd9f0e9003 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
#define IWM_CMD_PMKID_FLUSH 3
struct iwm_umac_pmkid_update {
+ struct iwm_umac_wifi_if hdr;
__le32 command;
u8 bssid[ETH_ALEN];
__le16 reserved;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142bef..79ffa3b98d73 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
- struct list_head rx_packets[IWM_RX_ID_HASH + 1];
+ struct list_head rx_packets[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 5e650f358415..f03d5e4e59c3 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1160,11 +1160,11 @@ int lbs_adhoc_stop(struct lbs_private *priv)
static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && match_bss->wpa_ie[0] != WLAN_EID_GENERIC
- && match_bss->rsn_ie[0] != WLAN_EID_RSN
- && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
+ match_bss->rsn_ie[0] != WLAN_EID_RSN &&
+ !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1173,9 +1173,9 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1184,8 +1184,8 @@ static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && secinfo->WPAenabled
- && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
+ if (!secinfo->wep_enabled && secinfo->WPAenabled &&
+ (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
/* privacy bit may NOT be set in some APs like LinkSys WRT54G
&& (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
)
@@ -1210,11 +1210,11 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC)
- && (match_bss->rsn_ie[0] != WLAN_EID_RSN)
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
+ (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1525,8 +1525,8 @@ static int assoc_helper_associate(struct lbs_private *priv,
/* If we're given and 'any' BSSID, try associating based on SSID */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(bssid_any, assoc_req->bssid)
- && compare_ether_addr(bssid_off, assoc_req->bssid)) {
+ if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
+ compare_ether_addr(bssid_off, assoc_req->bssid)) {
ret = assoc_helper_bssid(priv, assoc_req);
done = 1;
}
@@ -1612,11 +1612,9 @@ static int assoc_helper_channel(struct lbs_private *priv,
goto restore_mesh;
}
- if ( assoc_req->secinfo.wep_enabled
- && (assoc_req->wep_keys[0].len
- || assoc_req->wep_keys[1].len
- || assoc_req->wep_keys[2].len
- || assoc_req->wep_keys[3].len)) {
+ if (assoc_req->secinfo.wep_enabled &&
+ (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
+ assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
/* Make sure WEP keys are re-sent to firmware */
set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
}
@@ -1983,14 +1981,14 @@ void lbs_association_worker(struct work_struct *work)
assoc_req->secinfo.auth_mode);
/* If 'any' SSID was specified, find an SSID to associate with */
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)
- && !assoc_req->ssid_len)
+ if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
+ !assoc_req->ssid_len)
find_any_ssid = 1;
/* But don't use 'any' SSID if there's a valid locked BSSID to use */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(assoc_req->bssid, bssid_any)
- && compare_ether_addr(assoc_req->bssid, bssid_off))
+ if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
+ compare_ether_addr(assoc_req->bssid, bssid_off))
find_any_ssid = 0;
}
@@ -2052,13 +2050,6 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if ( test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
- ret = assoc_helper_wep_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
ret = assoc_helper_secinfo(priv, assoc_req);
if (ret)
@@ -2071,18 +2062,31 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
+ /*
+ * v10 FW wants WPA keys to be set/cleared before WEP key operations,
+ * otherwise it will fail to correctly associate to WEP networks.
+ * Other firmware versions don't appear to care.
+ */
+ if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
ret = assoc_helper_wpa_keys(priv, assoc_req);
if (ret)
goto out;
}
+ if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
+ ret = assoc_helper_wep_keys(priv, assoc_req);
+ if (ret)
+ goto out;
+ }
+
+
/* SSID/BSSID should be the _last_ config option set, because they
* trigger the association attempt.
*/
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
+ if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
int success = 1;
ret = assoc_helper_associate(priv, assoc_req);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 954cd00f7452..e385af1f4583 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,6 +1,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
@@ -364,8 +365,7 @@ int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
- sizeof(priv->dev->dev_addr));
+ memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 84df3fcf37b3..0dbda8dfbd99 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -281,6 +281,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct mac_address addresses[2];
+
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
@@ -1154,7 +1156,11 @@ static int __init init_mac80211_hwsim(void)
SET_IEEE80211_DEV(hw, data->dev);
addr[3] = i >> 8;
addr[4] = i;
- SET_IEEE80211_PERM_ADDR(hw, addr);
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
hw->channel_change_time = 1;
hw->queues = 4;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index c1c6ecd0c5b3..f0f08f3919cc 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
- * Copyright (C) 2008-2009 Marvell Semiconductor Inc.
+ * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.10"
+#define MWL8K_VERSION "0.12"
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -141,6 +141,14 @@ struct mwl8k_priv {
/* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
+ struct ieee80211_supported_band band_24;
+ struct ieee80211_channel channels_24[14];
+ struct ieee80211_rate rates_24[14];
+ struct ieee80211_supported_band band_50;
+ struct ieee80211_channel channels_50[4];
+ struct ieee80211_rate rates_50[9];
+ u32 ap_macids_supported;
+ u32 sta_macids_supported;
/* firmware access */
struct mutex fw_mutex;
@@ -154,9 +162,9 @@ struct mwl8k_priv {
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
- struct ieee80211_vif *vif;
-
- struct ieee80211_channel *current_channel;
+ /* List of interfaces. */
+ u32 macids_used;
+ struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
@@ -175,16 +183,15 @@ struct mwl8k_priv {
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
- /* PHY parameters */
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[14];
-
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
bool wmm_enabled;
+ struct work_struct sta_notify_worker;
+ spinlock_t sta_notify_list_lock;
+ struct list_head sta_notify_list;
+
/* XXX need to convert this to handle multiple interfaces */
bool capture_beacon;
u8 capture_bssid[ETH_ALEN];
@@ -198,28 +205,33 @@ struct mwl8k_priv {
*/
struct work_struct finalize_join_worker;
- /* Tasklet to reclaim TX descriptors and buffers after tx */
- struct tasklet_struct tx_reclaim_task;
+ /* Tasklet to perform TX reclaim. */
+ struct tasklet_struct poll_tx_task;
+
+ /* Tasklet to perform RX. */
+ struct tasklet_struct poll_rx_task;
};
/* Per interface specific private data */
struct mwl8k_vif {
- /* Local MAC address. */
- u8 mac_addr[ETH_ALEN];
-
- /* BSSID of AP. */
- u8 bssid[ETH_ALEN];
+ struct list_head list;
+ struct ieee80211_vif *vif;
- /* Index into station database. Returned by UPDATE_STADB. */
- u8 peer_id;
+ /* Firmware macid for this vif. */
+ int macid;
- /* Non AMPDU sequence number assigned by driver */
- u16 seqno;
+ /* Non AMPDU sequence number assigned by driver. */
+ u16 seqno;
};
-
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
-static const struct ieee80211_channel mwl8k_channels[] = {
+struct mwl8k_sta {
+ /* Index into station database. Returned by UPDATE_STADB. */
+ u8 peer_id;
+};
+#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
+
+static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
@@ -236,7 +248,7 @@ static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2484, .hw_value = 14, },
};
-static const struct ieee80211_rate mwl8k_rates[] = {
+static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
@@ -253,8 +265,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 720, .hw_value = 144, },
};
-static const u8 mwl8k_rateids[12] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
+static const struct ieee80211_channel mwl8k_channels_50[] = {
+ { .center_freq = 5180, .hw_value = 36, },
+ { .center_freq = 5200, .hw_value = 40, },
+ { .center_freq = 5220, .hw_value = 44, },
+ { .center_freq = 5240, .hw_value = 48, },
+};
+
+static const struct ieee80211_rate mwl8k_rates_50[] = {
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
@@ -270,6 +297,7 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_RF_ANTENNA 0x0020
+#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -283,8 +311,10 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
-#define MWL8K_CMD_SET_MAC_ADDR 0x0202
+#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
+#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
+#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -302,6 +332,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
+ MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -317,6 +348,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
+ MWL8K_CMDNAME(BSS_START);
+ MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -389,13 +422,11 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
return 0;
}
-MODULE_FIRMWARE("mwl8k/helper_8687.fw");
-MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
-
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
- __le16 seq_num;
+ __u8 seq_num;
+ __u8 macid;
__le16 result;
char payload[0];
} __attribute__((packed));
@@ -453,6 +484,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
+ cmd->macid = 0;
cmd->result = 0;
done = 0;
@@ -598,54 +630,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
}
-/*
- * Defines shared between transmission and reception.
- */
-/* HT control fields for firmware */
-struct ewc_ht_info {
- __le16 control1;
- __le16 control2;
- __le16 control3;
-} __attribute__((packed));
-
-/* Firmware Station database operations */
-#define MWL8K_STA_DB_ADD_ENTRY 0
-#define MWL8K_STA_DB_MODIFY_ENTRY 1
-#define MWL8K_STA_DB_DEL_ENTRY 2
-#define MWL8K_STA_DB_FLUSH 3
-
-/* Peer Entry flags - used to define the type of the peer node */
-#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-
-struct peer_capability_info {
- /* Peer type - AP vs. STA. */
- __u8 peer_type;
-
- /* Basic 802.11 capabilities from assoc resp. */
- __le16 basic_caps;
-
- /* Set if peer supports 802.11n high throughput (HT). */
- __u8 ht_support;
-
- /* Valid if HT is supported. */
- __le16 ht_caps;
- __u8 extended_ht_caps;
- struct ewc_ht_info ewc_info;
-
- /* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[12];
-
- /* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[16];
- __u8 pad[16];
-
- /* If set, interoperability mode, no proprietary extensions. */
- __u8 interop;
- __u8 pad2;
- __u8 station_id;
- __le16 amsdu_enabled;
-} __attribute__((packed));
-
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
@@ -779,15 +763,21 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
int i;
- for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
- if (mwl8k_rates[i].hw_value == rxd->rate) {
+ for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
+ if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -878,7 +868,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -1225,7 +1221,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
-#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
@@ -1269,8 +1265,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
}
if (priv->pending_tx_pkts < oldcount) {
- printk(KERN_NOTICE "%s: timeout waiting for tx "
- "rings to drain (%d -> %d pkts), retrying\n",
+ printk(KERN_NOTICE "%s: waiting for tx rings "
+ "to drain (%d -> %d pkts)\n",
wiphy_name(hw->wiphy), oldcount,
priv->pending_tx_pkts);
retry = 1;
@@ -1295,13 +1291,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
-static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
+static int
+mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- int wake = 0;
+ int processed;
- while (txq->stats.len > 0) {
+ processed = 0;
+ while (txq->stats.len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1348,11 +1346,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
ieee80211_tx_status_irqsafe(hw, skb);
- wake = 1;
+ processed++;
}
- if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
+ if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
ieee80211_wake_queue(hw, index);
+
+ return processed;
}
/* must be called only when the card's transmit is completely halted */
@@ -1361,7 +1361,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- mwl8k_txq_reclaim(hw, index, 1);
+ mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
@@ -1399,11 +1399,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- u16 seqno = mwl8k_vif->seqno;
-
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- wh->seq_ctrl |= cpu_to_le16(seqno << 4);
- mwl8k_vif->seqno = seqno++ % 4096;
+ wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
+ mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
@@ -1449,7 +1447,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- tx->peer_id = mwl8k_vif->peer_id;
+ if (!priv->ap_fw && tx_info->control.sta != NULL)
+ tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ else
+ tx->peer_id = 0;
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
@@ -1602,6 +1603,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
return rc;
}
+static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct mwl8k_cmd_pkt *cmd)
+{
+ if (vif != NULL)
+ cmd->macid = MWL8K_VIF(vif)->macid;
+ return mwl8k_post_cmd(hw, cmd);
+}
+
+/*
+ * Setup code shared between STA and AP firmware images.
+ */
+static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
+ memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
+
+ BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
+ memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
+
+ priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.channels = priv->channels_24;
+ priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
+ priv->band_24.bitrates = priv->rates_24;
+ priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+}
+
+static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
+ memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
+
+ BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
+ memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
+
+ priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.channels = priv->channels_50;
+ priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
+ priv->band_50.bitrates = priv->rates_50;
+ priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+}
+
/*
* CMD_GET_HW_SPEC (STA version).
*/
@@ -1624,6 +1675,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 total_rxd;
} __attribute__((packed));
+#define MWL8K_CAP_MAX_AMSDU 0x20000000
+#define MWL8K_CAP_GREENFIELD 0x08000000
+#define MWL8K_CAP_AMPDU 0x04000000
+#define MWL8K_CAP_RX_STBC 0x01000000
+#define MWL8K_CAP_TX_STBC 0x00800000
+#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
+#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
+#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
+#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
+#define MWL8K_CAP_DELAY_BA 0x00003000
+#define MWL8K_CAP_MIMO 0x00000200
+#define MWL8K_CAP_40MHZ 0x00000100
+#define MWL8K_CAP_BAND_MASK 0x00000007
+#define MWL8K_CAP_5GHZ 0x00000004
+#define MWL8K_CAP_2GHZ4 0x00000001
+
+static void
+mwl8k_set_ht_caps(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *band, u32 cap)
+{
+ int rx_streams;
+ int tx_streams;
+
+ band->ht_cap.ht_supported = 1;
+
+ if (cap & MWL8K_CAP_MAX_AMSDU)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+ if (cap & MWL8K_CAP_GREENFIELD)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
+ if (cap & MWL8K_CAP_AMPDU) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ }
+ if (cap & MWL8K_CAP_RX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
+ if (cap & MWL8K_CAP_TX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+ if (cap & MWL8K_CAP_SHORTGI_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ if (cap & MWL8K_CAP_SHORTGI_20MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ if (cap & MWL8K_CAP_DELAY_BA)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
+ if (cap & MWL8K_CAP_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
+ tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
+
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ band->ht_cap.mcs.rx_mask[1] = 0xff;
+ if (rx_streams >= 3)
+ band->ht_cap.mcs.rx_mask[2] = 0xff;
+ band->ht_cap.mcs.rx_mask[4] = 0x01;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ if (rx_streams != tx_streams) {
+ band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
+}
+
+static void
+mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
+ mwl8k_setup_2ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_24, caps);
+ }
+
+ if (caps & MWL8K_CAP_5GHZ) {
+ mwl8k_setup_5ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_50, caps);
+ }
+}
+
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1654,6 +1788,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
+ priv->ap_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
@@ -1707,6 +1844,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_setup_2ghz_band(hw);
+ priv->ap_macids_supported = 0x000000ff;
+ priv->sta_macids_supported = 0x00000000;
off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1752,7 +1892,9 @@ struct mwl8k_cmd_set_hw_spec {
__le32 total_rxd;
} __attribute__((packed));
-#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
@@ -1773,7 +1915,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
- cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+ cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -2011,6 +2155,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
}
/*
+ * CMD_SET_BEACON.
+ */
+struct mwl8k_cmd_set_beacon {
+ struct mwl8k_cmd_pkt header;
+ __le16 beacon_len;
+ __u8 beacon[0];
+};
+
+static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *beacon, int len)
+{
+ struct mwl8k_cmd_set_beacon *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
+ cmd->beacon_len = cpu_to_le16(len);
+ memcpy(cmd->beacon, beacon, len);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
@@ -2045,7 +2219,7 @@ struct mwl8k_cmd_set_post_scan {
} __attribute__((packed));
static int
-mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
+mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
@@ -2076,8 +2250,9 @@ struct mwl8k_cmd_set_rf_channel {
} __attribute__((packed));
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel)
+ struct ieee80211_conf *conf)
{
+ struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
@@ -2089,10 +2264,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
+
if (channel->band == IEEE80211_BAND_2GHZ)
- cmd->channel_flags = cpu_to_le32(0x00000081);
- else
- cmd->channel_flags = cpu_to_le32(0x00000000);
+ cmd->channel_flags |= cpu_to_le32(0x00000001);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->channel_flags |= cpu_to_le32(0x00000004);
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2118,10 +2302,26 @@ struct mwl8k_cmd_update_set_aid {
__u8 supp_rates[14];
} __attribute__((packed));
+static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
+{
+ int i;
+ int j;
+
+ /*
+ * Clear nonstandard rates 4 and 13.
+ */
+ mask &= 0x1fef;
+
+ for (i = 0, j = 0; i < 14; i++) {
+ if (mask & (1 << i))
+ rates[j++] = mwl8k_rates_24[i].hw_value;
+ }
+}
+
static int
-mwl8k_cmd_set_aid(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
struct mwl8k_cmd_update_set_aid *cmd;
u16 prot_mode;
int rc;
@@ -2133,8 +2333,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->aid = cpu_to_le16(vif->bss_conf.aid);
-
- memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
if (vif->bss_conf.use_cts_prot) {
prot_mode = MWL8K_FRAME_PROT_11G;
@@ -2154,7 +2353,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
cmd->protection_mode = cpu_to_le16(prot_mode);
- memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2175,7 +2374,8 @@ struct mwl8k_cmd_set_rate {
} __attribute__((packed));
static int
-mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 legacy_rate_mask, u8 *mcs_rates)
{
struct mwl8k_cmd_set_rate *cmd;
int rc;
@@ -2186,7 +2386,8 @@ mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
+ memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2244,8 +2445,8 @@ struct mwl8k_cmd_set_rts_threshold {
__le16 threshold;
} __attribute__((packed));
-static int mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw,
- u16 action, u16 threshold)
+static int
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
@@ -2256,8 +2457,8 @@ static int mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(action);
- cmd->threshold = cpu_to_le16(threshold);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2357,12 +2558,6 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
if (cmd == NULL)
return -ENOMEM;
- /*
- * Queues 0 (BE) and 1 (BK) are swapped in hardware for
- * this call.
- */
- qnum ^= !(qnum >> 1);
-
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2448,49 +2643,70 @@ static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
}
/*
- * CMD_USE_FIXED_RATE.
+ * CMD_USE_FIXED_RATE (STA version).
*/
-#define MWL8K_RATE_TABLE_SIZE 8
-#define MWL8K_UCAST_RATE 0
-#define MWL8K_USE_AUTO_RATE 0x0002
+struct mwl8k_cmd_use_fixed_rate_sta {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[8];
+ __le32 rate_type;
+ __le32 reserved1;
+ __le32 reserved2;
+} __attribute__((packed));
-struct mwl8k_rate_entry {
- /* Set to 1 if HT rate, 0 if legacy. */
- __le32 is_ht_rate;
+#define MWL8K_USE_AUTO_RATE 0x0002
+#define MWL8K_UCAST_RATE 0
- /* Set to 1 to use retry_count field. */
- __le32 enable_retry;
+static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
+{
+ struct mwl8k_cmd_use_fixed_rate_sta *cmd;
+ int rc;
- /* Specified legacy rate or MCS. */
- __le32 rate;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- /* Number of allowed retries. */
- __le32 retry_count;
-} __attribute__((packed));
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
-struct mwl8k_rate_table {
- /* 1 to allow specified rate and below */
- __le32 allow_rate_drop;
- __le32 num_rates;
- struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
-} __attribute__((packed));
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
-struct mwl8k_cmd_use_fixed_rate {
- struct mwl8k_cmd_pkt header;
- __le32 action;
- struct mwl8k_rate_table rate_table;
+ return rc;
+}
- /* Unicast, Broadcast or Multicast */
- __le32 rate_type;
- __le32 reserved1;
- __le32 reserved2;
+/*
+ * CMD_USE_FIXED_RATE (AP version).
+ */
+struct mwl8k_cmd_use_fixed_rate_ap {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct mwl8k_rate_entry_ap {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[4];
+ u8 multicast_rate;
+ u8 multicast_rate_type;
+ u8 management_rate;
} __attribute__((packed));
-static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
- u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
+static int
+mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
{
- struct mwl8k_cmd_use_fixed_rate *cmd;
- int count;
+ struct mwl8k_cmd_use_fixed_rate_ap *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2499,32 +2715,9 @@ static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
-
- cmd->action = cpu_to_le32(action);
- cmd->rate_type = cpu_to_le32(rate_type);
-
- if (rate_table != NULL) {
- /*
- * Copy over each field manually so that endian
- * conversion can be done.
- */
- cmd->rate_table.allow_rate_drop =
- cpu_to_le32(rate_table->allow_rate_drop);
- cmd->rate_table.num_rates =
- cpu_to_le32(rate_table->num_rates);
-
- for (count = 0; count < rate_table->num_rates; count++) {
- struct mwl8k_rate_entry *dst =
- &cmd->rate_table.rate_entry[count];
- struct mwl8k_rate_entry *src =
- &rate_table->rate_entry[count];
-
- dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
- dst->enable_retry = cpu_to_le32(src->enable_retry);
- dst->rate = cpu_to_le32(src->rate);
- dst->retry_count = cpu_to_le32(src->retry_count);
- }
- }
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->multicast_rate = mcast;
+ cmd->management_rate = mgmt;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2573,12 +2766,33 @@ struct mwl8k_cmd_set_mac_addr {
};
} __attribute__((packed));
-static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
+#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
+#define MWL8K_MAC_TYPE_PRIMARY_AP 2
+#define MWL8K_MAC_TYPE_SECONDARY_AP 3
+
+static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
struct mwl8k_cmd_set_mac_addr *cmd;
+ int mac_type;
int rc;
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
+ }
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -2586,13 +2800,13 @@ static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
if (priv->ap_fw) {
- cmd->mbss.mac_type = 0;
+ cmd->mbss.mac_type = cpu_to_le16(mac_type);
memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
- rc = mwl8k_post_cmd(hw, &cmd->header);
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
@@ -2628,8 +2842,180 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
}
/*
+ * CMD_BSS_START.
+ */
+struct mwl8k_cmd_bss_start {
+ struct mwl8k_cmd_pkt header;
+ __le32 enable;
+} __attribute__((packed));
+
+static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int enable)
+{
+ struct mwl8k_cmd_bss_start *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->enable = cpu_to_le32(enable);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_SET_NEW_STN.
+ */
+struct mwl8k_cmd_set_new_stn {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
+ __u8 mac_addr[6];
+ __le16 stn_id;
+ __le16 action;
+ __le16 rsvd;
+ __le32 legacy_rates;
+ __u8 ht_rates[4];
+ __le16 cap_info;
+ __le16 ht_capabilities_info;
+ __u8 mac_ht_param_info;
+ __u8 rev;
+ __u8 control_channel;
+ __u8 add_channel;
+ __le16 op_mode;
+ __le16 stbc;
+ __u8 add_qos_info;
+ __u8 is_qos_sta;
+ __le32 fw_sta_ptr;
+} __attribute__((packed));
+
+#define MWL8K_STA_ACTION_ADD 0
+#define MWL8K_STA_ACTION_REMOVE 2
+
+static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ u32 rates;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->aid = cpu_to_le16(sta->aid);
+ memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
+ cmd->stn_id = cpu_to_le16(sta->aid);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ cmd->legacy_rates = cpu_to_le32(rates);
+ if (sta->ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ cmd->is_qos_sta = 1;
+ }
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_UPDATE_STADB.
*/
+struct ewc_ht_info {
+ __le16 control1;
+ __le16 control2;
+ __le16 control3;
+} __attribute__((packed));
+
+struct peer_capability_info {
+ /* Peer type - AP vs. STA. */
+ __u8 peer_type;
+
+ /* Basic 802.11 capabilities from assoc resp. */
+ __le16 basic_caps;
+
+ /* Set if peer supports 802.11n high throughput (HT). */
+ __u8 ht_support;
+
+ /* Valid if HT is supported. */
+ __le16 ht_caps;
+ __u8 extended_ht_caps;
+ struct ewc_ht_info ewc_info;
+
+ /* Legacy rate table. Intersection of our rates and peer rates. */
+ __u8 legacy_rates[12];
+
+ /* HT rate table. Intersection of our rates and peer rates. */
+ __u8 ht_rates[16];
+ __u8 pad[16];
+
+ /* If set, interoperability mode, no proprietary extensions. */
+ __u8 interop;
+ __u8 pad2;
+ __u8 station_id;
+ __le16 amsdu_enabled;
+} __attribute__((packed));
+
struct mwl8k_cmd_update_stadb {
struct mwl8k_cmd_pkt header;
@@ -2645,12 +3031,19 @@ struct mwl8k_cmd_update_stadb {
struct peer_capability_info peer_info;
} __attribute__((packed));
-static int mwl8k_cmd_update_stadb(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, __u32 action)
+#define MWL8K_STA_DB_MODIFY_ENTRY 1
+#define MWL8K_STA_DB_DEL_ENTRY 2
+
+/* Peer Entry flags - used to define the type of the peer node */
+#define MWL8K_PEER_TYPE_ACCESSPOINT 2
+
+static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
struct mwl8k_cmd_update_stadb *cmd;
- struct peer_capability_info *peer_info;
+ struct peer_capability_info *p;
+ u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2659,37 +3052,47 @@ static int mwl8k_cmd_update_stadb(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
+ memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
+
+ p = &cmd->peer_info;
+ p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
+ p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
+ p->ht_support = sta->ht_cap.ht_supported;
+ p->ht_caps = sta->ht_cap.cap;
+ p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ legacy_rate_mask_to_array(p->legacy_rates, rates);
+ memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ p->interop = 1;
+ p->amsdu_enabled = 0;
- cmd->action = cpu_to_le32(action);
- peer_info = &cmd->peer_info;
- memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- switch (action) {
- case MWL8K_STA_DB_ADD_ENTRY:
- case MWL8K_STA_DB_MODIFY_ENTRY:
- /* Build peer_info block */
- peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
- peer_info->basic_caps =
- cpu_to_le16(vif->bss_conf.assoc_capability);
- memcpy(peer_info->legacy_rates, mwl8k_rateids,
- sizeof(mwl8k_rateids));
- peer_info->interop = 1;
- peer_info->amsdu_enabled = 0;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = peer_info->station_id;
+ return rc ? rc : p->station_id;
+}
- break;
+static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_update_stadb *cmd;
+ int rc;
- case MWL8K_STA_DB_DEL_ENTRY:
- case MWL8K_STA_DB_FLUSH:
- default:
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = 0;
- break;
- }
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
+ memcpy(cmd->peer_addr, addr, ETH_ALEN);
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
@@ -2706,19 +3109,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
- iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
-
if (!status)
return IRQ_NONE;
- if (status & MWL8K_A2H_INT_TX_DONE)
- tasklet_schedule(&priv->tx_reclaim_task);
+ if (status & MWL8K_A2H_INT_TX_DONE) {
+ status &= ~MWL8K_A2H_INT_TX_DONE;
+ tasklet_schedule(&priv->poll_tx_task);
+ }
if (status & MWL8K_A2H_INT_RX_READY) {
- while (rxq_process(hw, 0, 1))
- rxq_refill(hw, 0, 1);
+ status &= ~MWL8K_A2H_INT_RX_READY;
+ tasklet_schedule(&priv->poll_rx_task);
}
+ if (status)
+ iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
@@ -2733,6 +3139,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mwl8k_tx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+ int i;
+
+ limit = 32;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
+
+ if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
+ complete(priv->tx_wait);
+ priv->tx_wait = NULL;
+ }
+
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_TX_DONE,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_tx_task);
+ }
+}
+
+static void mwl8k_rx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+
+ limit = 32;
+ limit -= rxq_process(hw, 0, limit);
+ limit -= rxq_refill(hw, 0, limit);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_rx_task);
+ }
+}
+
/*
* Core driver operations.
@@ -2743,7 +3196,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int index = skb_get_queue_mapping(skb);
int rc;
- if (priv->current_channel == NULL) {
+ if (!priv->radio_on) {
printk(KERN_DEBUG "%s: dropped TX frame since radio "
"disabled\n", wiphy_name(hw->wiphy));
dev_kfree_skb(skb);
@@ -2768,8 +3221,9 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return -EIO;
}
- /* Enable tx reclaim tasklet */
- tasklet_enable(&priv->tx_reclaim_task);
+ /* Enable TX reclaim and RX tasklets. */
+ tasklet_enable(&priv->poll_tx_task);
+ tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -2802,7 +3256,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
}
return rc;
@@ -2826,36 +3281,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
- /* Stop tx reclaim tasklet */
- tasklet_disable(&priv->tx_reclaim_task);
+ /* Stop TX reclaim and RX tasklets. */
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
-
- /*
- * We only support one active interface at a time.
- */
- if (priv->vif != NULL)
- return -EBUSY;
-
- /*
- * We only support managed interfaces for now.
- */
- if (vif->type != NL80211_IFTYPE_STATION)
- return -EINVAL;
+ u32 macids_supported;
+ int macid;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
- * mode.
+ * mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
printk(KERN_INFO "%s: unable to create STA "
@@ -2864,19 +3310,37 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
- /* Clean out driver private area */
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ macids_supported = priv->ap_macids_supported;
+ break;
+ case NL80211_IFTYPE_STATION:
+ macids_supported = priv->sta_macids_supported;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ macid = ffs(macids_supported & ~priv->macids_used);
+ if (!macid--)
+ return -EBUSY;
+
+ /* Setup driver private area. */
mwl8k_vif = MWL8K_VIF(vif);
memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
+ mwl8k_vif->vif = vif;
+ mwl8k_vif->macid = macid;
+ mwl8k_vif->seqno = 0;
- /* Set and save the mac address */
- mwl8k_cmd_set_mac_addr(hw, vif->addr);
- memcpy(mwl8k_vif->mac_addr, vif->addr, ETH_ALEN);
+ /* Set the mac address. */
+ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
- /* Set Initial sequence number to zero */
- mwl8k_vif->seqno = 0;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_add_self(hw, vif);
- priv->vif = vif;
- priv->current_channel = NULL;
+ priv->macids_used |= 1 << mwl8k_vif->macid;
+ list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
@@ -2885,13 +3349,15 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->vif == NULL)
- return;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_cmd_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->vif = NULL;
+ priv->macids_used &= ~(1 << mwl8k_vif->macid);
+ list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2902,7 +3368,6 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (conf->flags & IEEE80211_CONF_IDLE) {
mwl8k_cmd_radio_disable(hw);
- priv->current_channel = NULL;
return 0;
}
@@ -2914,12 +3379,10 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf->channel);
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
- priv->current_channel = conf->channel;
-
if (conf->power_level > 18)
conf->power_level = 18;
rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
@@ -2940,75 +3403,160 @@ out:
return rc;
}
-static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static void
+mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ u32 ap_legacy_rates;
+ u8 ap_mcs_rates[16];
int rc;
- if ((changed & BSS_CHANGED_ASSOC) == 0)
+ if (mwl8k_fw_lock(hw))
return;
- priv->capture_beacon = false;
-
- rc = mwl8k_fw_lock(hw);
- if (rc)
- return;
+ /*
+ * No need to capture a beacon if we're no longer associated.
+ */
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ priv->capture_beacon = false;
+ /*
+ * Get the AP's legacy and MCS rates.
+ */
if (vif->bss_conf.assoc) {
- memcpy(mwl8k_vif->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ struct ieee80211_sta *ap;
+
+ rcu_read_lock();
+
+ ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (ap == NULL) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ } else {
+ ap_legacy_rates =
+ ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ }
+ memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+
+ rcu_read_unlock();
+ }
- /* Install rates */
- rc = mwl8k_cmd_set_rate(hw, vif);
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
- /* Turn on rate adaptation */
- rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
- MWL8K_UCAST_RATE, NULL);
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ }
- /* Set radio preamble */
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rc = mwl8k_set_radio_preamble(hw,
vif->bss_conf.use_short_preamble);
if (rc)
goto out;
+ }
- /* Set slot time */
+ if (changed & BSS_CHANGED_ERP_SLOT) {
rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
+ }
- /* Update peer rate info */
- rc = mwl8k_cmd_update_stadb(hw, vif,
- MWL8K_STA_DB_MODIFY_ENTRY);
- if (rc)
- goto out;
-
- /* Set AID */
- rc = mwl8k_cmd_set_aid(hw, vif);
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT))) {
+ rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
+ }
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
- memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
+ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
- } else {
- rc = mwl8k_cmd_update_stadb(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
- memset(mwl8k_vif->bssid, 0, ETH_ALEN);
}
out:
mwl8k_fw_unlock(hw);
}
+static void
+mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ int rc;
+
+ if (mwl8k_fw_lock(hw))
+ return;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
+ if (rc)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ int idx;
+ int rate;
+
+ /*
+ * Use lowest supported basic rate for multicasts
+ * and management frames (such as probe responses --
+ * beacons will always go out at 1 Mb/s).
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb != NULL) {
+ mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
+
+out:
+ mwl8k_fw_unlock(hw);
+}
+
+static void
+mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (!priv->ap_fw)
+ mwl8k_bss_info_changed_sta(hw, vif, info, changed);
+ else
+ mwl8k_bss_info_changed_ap(hw, vif, info, changed);
+}
+
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mclist)
{
@@ -3038,7 +3586,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
- if (priv->vif != NULL) {
+ if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
printk(KERN_INFO "%s: not enabling sniffer "
"mode because STA interface is active\n",
@@ -3059,6 +3607,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
return 1;
}
+static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
+{
+ if (!list_empty(&priv->vif_list))
+ return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
+
+ return NULL;
+}
+
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -3090,8 +3646,10 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
- if (mwl8k_fw_lock(hw))
+ if (mwl8k_fw_lock(hw)) {
+ kfree(cmd);
return;
+ }
if (priv->sniffer_enabled) {
mwl8k_cmd_enable_sniffer(hw, 0);
@@ -3105,7 +3663,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
- u8 *bssid;
+ struct mwl8k_vif *mwl8k_vif;
+ const u8 *bssid;
/*
* Enable the BSS filter.
@@ -3115,9 +3674,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
- bssid = "\x01\x00\x00\x00\x00\x00";
- if (priv->vif != NULL)
- bssid = MWL8K_VIF(priv->vif)->bssid;
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ bssid = mwl8k_vif->vif->bss_conf.bssid;
+ else
+ bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
@@ -3144,7 +3705,93 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- return mwl8k_cmd_set_rts_threshold(hw, MWL8K_CMD_SET, value);
+ return mwl8k_cmd_set_rts_threshold(hw, value);
+}
+
+struct mwl8k_sta_notify_item
+{
+ struct list_head list;
+ struct ieee80211_vif *vif;
+ enum sta_notify_cmd cmd;
+ struct ieee80211_sta sta;
+};
+
+static void
+mwl8k_do_sta_notify(struct ieee80211_hw *hw, struct mwl8k_sta_notify_item *s)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ /*
+ * STA firmware uses UPDATE_STADB, AP firmware uses SET_NEW_STN.
+ */
+ if (!priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ int rc;
+
+ rc = mwl8k_cmd_update_stadb_add(hw, s->vif, &s->sta);
+ if (rc >= 0) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(s->vif, s->sta.addr);
+ if (sta != NULL)
+ MWL8K_STA(sta)->peer_id = rc;
+ rcu_read_unlock();
+ }
+ } else if (!priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_update_stadb_del(hw, s->vif, s->sta.addr);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ mwl8k_cmd_set_new_stn_add(hw, s->vif, &s->sta);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_set_new_stn_del(hw, s->vif, s->sta.addr);
+ }
+}
+
+static void mwl8k_sta_notify_worker(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, sta_notify_worker);
+ struct ieee80211_hw *hw = priv->hw;
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ while (!list_empty(&priv->sta_notify_list)) {
+ struct mwl8k_sta_notify_item *s;
+
+ s = list_entry(priv->sta_notify_list.next,
+ struct mwl8k_sta_notify_item, list);
+ list_del(&s->list);
+
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+
+ mwl8k_do_sta_notify(hw, s);
+ kfree(s);
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ }
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+}
+
+static void
+mwl8k_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_sta_notify_item *s;
+
+ if (cmd != STA_NOTIFY_ADD && cmd != STA_NOTIFY_REMOVE)
+ return;
+
+ s = kmalloc(sizeof(*s), GFP_ATOMIC);
+ if (s != NULL) {
+ s->vif = vif;
+ s->cmd = cmd;
+ s->sta = *sta;
+
+ spin_lock(&priv->sta_notify_list_lock);
+ list_add_tail(&s->list, &priv->sta_notify_list);
+ spin_unlock(&priv->sta_notify_list_lock);
+
+ ieee80211_queue_work(hw, &priv->sta_notify_worker);
+ }
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3195,6 +3842,22 @@ static int mwl8k_get_stats(struct ieee80211_hw *hw,
return mwl8k_cmd_get_stat(hw, stats);
}
+static int
+mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
static const struct ieee80211_ops mwl8k_ops = {
.tx = mwl8k_tx,
.start = mwl8k_start,
@@ -3206,47 +3869,45 @@ static const struct ieee80211_ops mwl8k_ops = {
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
+ .sta_notify = mwl8k_sta_notify,
.conf_tx = mwl8k_conf_tx,
.get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
+ .ampdu_action = mwl8k_ampdu_action,
};
-static void mwl8k_tx_reclaim_handler(unsigned long data)
-{
- int i;
- struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
- struct mwl8k_priv *priv = hw->priv;
-
- spin_lock_bh(&priv->tx_lock);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 0);
-
- if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
- complete(priv->tx_wait);
- priv->tx_wait = NULL;
- }
- spin_unlock_bh(&priv->tx_lock);
-}
-
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
+ mgmt->u.beacon.variable, len);
+ int dtim_period = 1;
- mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len,
- priv->vif->bss_conf.dtim_period);
- dev_kfree_skb(skb);
+ if (tim && tim[1] >= 2)
+ dtim_period = tim[3];
+ mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
+
+ dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
- MWL8687 = 0,
+ MWL8363 = 0,
+ MWL8687,
MWL8366,
};
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
+ [MWL8363] = {
+ .part_name = "88w8363",
+ .helper_image = "mwl8k/helper_8363.fw",
+ .fw_image = "mwl8k/fmimage_8363.fw",
+ },
[MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
@@ -3260,10 +3921,20 @@ static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
},
};
+MODULE_FIRMWARE("mwl8k/helper_8363.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+MODULE_FIRMWARE("mwl8k/helper_8366.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+ { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3361,27 +4032,23 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
mwl8k_release_firmware(priv);
- if (priv->ap_fw)
+ if (priv->ap_fw) {
priv->rxd_ops = priv->device_info->ap_rxd_ops;
- else
+ if (priv->rxd_ops == NULL) {
+ printk(KERN_ERR "%s: Driver does not have AP "
+ "firmware image support for this hardware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+ } else {
priv->rxd_ops = &rxd_sta_ops;
+ }
priv->sniffer_enabled = false;
priv->wmm_enabled = false;
priv->pending_tx_pkts = 0;
- memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
- priv->band.band = IEEE80211_BAND_2GHZ;
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
-
- BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
- memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
-
/*
* Extra headroom is the size of the required DMA header
* minus the size of the smallest 802.11 frame (CTS frame).
@@ -3396,19 +4063,28 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/* Set rssi and noise values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
- priv->vif = NULL;
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
priv->radio_short_preamble = 0;
+ /* Station database handling */
+ INIT_WORK(&priv->sta_notify_worker, mwl8k_sta_notify_worker);
+ spin_lock_init(&priv->sta_notify_list_lock);
+ INIT_LIST_HEAD(&priv->sta_notify_list);
+
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
- /* TX reclaim tasklet */
- tasklet_init(&priv->tx_reclaim_task,
- mwl8k_tx_reclaim_handler, (unsigned long)hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
@@ -3437,7 +4113,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
+ iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3450,7 +4127,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/*
* Temporarily enable interrupts. Initial firmware host
- * commands use interrupts and avoids polling. Disable
+ * commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3462,8 +4139,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = mwl8k_cmd_set_hw_spec(hw);
} else {
rc = mwl8k_cmd_get_hw_spec_sta(hw);
-
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
}
if (rc) {
printk(KERN_ERR "%s: Cannot initialise firmware\n",
@@ -3471,6 +4146,13 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_irq;
}
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+
/* Turn radio off */
rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
@@ -3479,7 +4161,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
}
/* Clear MAC address */
- rc = mwl8k_cmd_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
printk(KERN_ERR "%s: Cannot clear MAC address\n",
wiphy_name(hw->wiphy));
@@ -3494,7 +4176,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot register device\n",
wiphy_name(hw->wiphy));
- goto err_free_irq;
+ goto err_free_queues;
}
printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3562,15 +4244,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
- /* Remove tx reclaim tasklet */
- tasklet_kill(&priv->tx_reclaim_task);
+ /* Remove TX reclaim and RX tasklets. */
+ tasklet_kill(&priv->poll_tx_task);
+ tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c383410..075f446b3139 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_nortel_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e7..bda5317cc596 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f5..e0d5874ab42f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_plx_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc71..88cbc7902aa0 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_tmd_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..ed4bdffdd63e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
-static struct pci_device_id p54p_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
@@ -157,6 +157,14 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ dev_err(&priv->pdev->dev,
+ "RX DMA Mapping error\n");
+ break;
+ }
+
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -197,6 +205,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
continue;
}
+
+ if (unlikely(len > priv->common.rx_mtu)) {
+ if (net_ratelimit())
+ dev_err(&priv->pdev->dev, "rx'd frame size "
+ "exceeds length threshold.\n");
+
+ len = priv->common.rx_mtu;
+ }
skb_put(skb, len);
if (p54_rx(dev, skb)) {
@@ -218,14 +234,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
}
-/* caller must hold priv->lock */
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- void **tx_buf)
+ struct sk_buff **tx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
+ struct sk_buff *skb;
u32 idx, i;
i = (*index) % ring_limit;
@@ -234,9 +250,8 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
desc = &ring[i];
- if (tx_buf[i])
- if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i]))
- p54_free_skb(dev, tx_buf[i]);
+
+ skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -247,17 +262,28 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
desc->len = 0;
desc->flags = 0;
+ if (skb && FREE_AFTER_TX(skb))
+ p54_free_skb(dev, skb);
+
i++;
i %= ring_limit;
}
}
-static void p54p_rx_tasklet(unsigned long dev_id)
+static void p54p_tasklet(unsigned long dev_id)
{
struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
+ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
+ ARRAY_SIZE(ring_control->tx_mgmt),
+ priv->tx_buf_mgmt);
+
+ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
+ ARRAY_SIZE(ring_control->tx_data),
+ priv->tx_buf_data);
+
p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
@@ -272,59 +298,49 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct p54p_priv *priv = dev->priv;
- struct p54p_ring_control *ring_control = priv->ring_control;
__le32 reg;
- spin_lock(&priv->lock);
reg = P54P_READ(int_ident);
if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+ goto out;
}
-
P54P_WRITE(int_ack, reg);
reg &= P54P_READ(int_enable);
- if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
- 3, ring_control->tx_mgmt,
- ARRAY_SIZE(ring_control->tx_mgmt),
- priv->tx_buf_mgmt);
-
- p54p_check_tx_ring(dev, &priv->tx_idx_data,
- 1, ring_control->tx_data,
- ARRAY_SIZE(ring_control->tx_data),
- priv->tx_buf_data);
-
- tasklet_schedule(&priv->rx_tasklet);
-
- } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
+ if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
+ tasklet_schedule(&priv->tasklet);
+ else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
complete(&priv->boot_comp);
- spin_unlock(&priv->lock);
-
+out:
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
+ unsigned long flags;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
- unsigned long flags;
struct p54p_desc *desc;
dma_addr_t mapping;
u32 device_idx, idx, i;
spin_lock_irqsave(&priv->lock, flags);
-
device_idx = le32_to_cpu(ring_control->device_idx[1]);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
- priv->tx_buf_data[i] = skb;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ p54_free_skb(dev, skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return ;
+ }
+ priv->tx_buf_data[i] = skb;
+
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
@@ -346,14 +362,14 @@ static void p54p_stop(struct ieee80211_hw *dev)
unsigned int i;
struct p54p_desc *desc;
- tasklet_kill(&priv->rx_tasklet);
-
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
free_irq(priv->pdev->irq, dev);
+ tasklet_kill(&priv->tasklet);
+
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
@@ -537,7 +553,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
err = request_firmware(&priv->firmware, "isl3886pci",
&priv->pdev->dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index fbb683953fb2..2feead617a3b 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -92,7 +92,7 @@ struct p54p_priv {
struct p54_common common;
struct pci_dev *pdev;
struct p54p_csr __iomem *map;
- struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tasklet;
const struct firmware *firmware;
spinlock_t lock;
struct p54p_ring_control *ring_control;
@@ -101,8 +101,8 @@ struct p54p_priv {
u32 rx_idx_mgmt, tx_idx_mgmt;
struct sk_buff *rx_buf_data[8];
struct sk_buff *rx_buf_mgmt[4];
- void *tx_buf_data[32];
- void *tx_buf_mgmt[4];
+ struct sk_buff *tx_buf_data[32];
+ struct sk_buff *tx_buf_mgmt[4];
struct completion boot_comp;
};
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f2..dc14420a9adc 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
-static const struct pci_device_id prism54_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
0x1260, 0x3890,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index d86d233c6810..aa579eb8723f 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1642,7 +1642,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
/*
* RT2400pci module information.
*/
-static struct pci_device_id rt2400pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 46cbc6ef66ab..77ee1df7933f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1940,7 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
/*
* RT2500pci module information.
*/
-static struct pci_device_id rt2500pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 8ff7db853286..529a37364eb0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -245,6 +245,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -339,7 +358,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
@@ -347,7 +366,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
return 0;
}
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
@@ -356,7 +375,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
-EXPORT_SYMBOL_GPL(rt2800_init_led);
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
@@ -1862,7 +1880,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rf(rt2x00dev, RF3020) &&
!rt2x00_rf(rt2x00dev, RF2020) &&
!rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022)) {
+ !rt2x00_rf(rt2x00dev, RF3022) &&
+ !rt2x00_rf(rt2x00dev, RF3052)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2047,7 +2066,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rf(rt2x00dev, RF2820) ||
rt2x00_rf(rt2x00dev, RF2720) ||
- (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(rt2x00dev, RF3052))) {
+ rt2x00_rf(rt2x00dev, RF3052)) {
spec->num_channels = 14;
spec->channels = rf_vals;
} else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac8..ebabeae62d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_led *led, enum led_type type);
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b93eabb4fbe1..d64181cbc9cb 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -453,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
-static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -479,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
- rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
@@ -562,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
- rt2800pci_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1208,7 +1190,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-static struct pci_device_id rt2800pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0510f020dcf5..82755cf8b73e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -248,24 +248,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
-static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -274,7 +256,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
@@ -344,7 +326,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
- rt2800usb_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index b4c6e0a6d7e0..096da85a66fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -119,6 +119,12 @@
( ((unsigned long)((__skb)->data + (__header))) & 3 )
/*
+ * Constants for extra TX headroom for alignment purposes.
+ */
+#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
+#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
+
+/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index d7711e4d4751..b93731b79903 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -688,7 +688,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Initialize extra TX headroom required.
*/
- rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+ rt2x00dev->hw->extra_tx_headroom =
+ max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
+ rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Take TX headroom required for alignment into account.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
+ else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 3d8fb684b4eb..0b4801a14601 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -104,7 +104,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
- skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -112,7 +112,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
/*
* Restore data pointer to original location again.
*/
- skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
@@ -134,7 +134,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->hw->extra_tx_headroom,
+ skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index c353b497a65d..1f97a797bc41 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2807,7 +2807,7 @@ static const struct rt2x00_ops rt61pci_ops = {
/*
* RT61pci module information.
*/
-static struct pci_device_id rt61pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
/* RT2561s */
{ PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
/* RT2561 v2 */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 5a2b7199f5d5..2b928ecf47bd 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
-static struct pci_device_id rtl8180_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -761,6 +761,14 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -771,6 +779,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.bss_info_changed = rtl8180_bss_info_changed,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
+ .get_tsf = rtl8180_get_tsf,
};
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index f336c63053c1..a05382557789 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1265,6 +1265,14 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
return 0;
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
.start = rtl8187_start,
@@ -1276,7 +1284,8 @@ static const struct ieee80211_ops rtl8187_ops = {
.prepare_multicast = rtl8187_prepare_multicast,
.configure_filter = rtl8187_configure_filter,
.conf_tx = rtl8187_conf_tx,
- .rfkill_poll = rtl8187_rfkill_poll
+ .rfkill_poll = rtl8187_rfkill_poll,
+ .get_tsf = rtl8187_get_tsf,
};
static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 6301578d1565..37c61c19cae5 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -341,9 +341,6 @@ struct wl1251 {
/* Are we currently scanning */
bool scanning;
- /* Our association ID */
- u16 aid;
-
/* Default key (for WEP) */
u32 default_key;
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726bd..0320b478bb3f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
kfree(cmd);
return ret;
}
+
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes)
+{
+ struct wl1251_cmd_scan *cmd;
+ int i, ret = 0;
+
+ wl1251_debug(DEBUG_CMD, "cmd scan");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
+ CFG_RX_MGMT_EN |
+ CFG_RX_BCN_EN);
+ cmd->params.scan_options = 0;
+ cmd->params.num_channels = n_channels;
+ cmd->params.num_probe_requests = n_probes;
+ cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ cmd->params.tid_trigger = 0;
+
+ for (i = 0; i < n_channels; i++) {
+ cmd->channels[i].min_duration =
+ cpu_to_le32(WL1251_SCAN_MIN_DURATION);
+ cmd->channels[i].max_duration =
+ cpu_to_le32(WL1251_SCAN_MAX_DURATION);
+ memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
+ memset(&cmd->channels[i].bssid_msb, 0xff, 2);
+ cmd->channels[i].early_termination = 0;
+ cmd->channels[i].tx_power_att = 0;
+ cmd->channels[i].channel = channels[i]->hw_value;
+ }
+
+ cmd->params.ssid_len = ssid_len;
+ if (ssid)
+ memcpy(cmd->params.ssid, ssid, ssid_len);
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd scan failed: %d", ret);
+ goto out;
+ }
+
+ wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+ if (cmd->header.status != CMD_STATUS_SUCCESS) {
+ wl1251_error("cmd scan status wasn't success: %d",
+ cmd->header.status);
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+{
+ struct wl1251_cmd_trigger_scan_to *cmd;
+ int ret;
+
+ wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->timeout = timeout;
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef5..4ad67cae94d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
#include "wl1251.h"
+#include <net/cfg80211.h>
+
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes);
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
/* unit ms */
#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_MIN_DURATION 30000
+#define WL1251_SCAN_MAX_DURATION 60000
+
+#define WL1251_SCAN_NUM_PROBES 3
-struct basic_scan_parameters {
+struct wl1251_scan_parameters {
u32 rx_config_options;
u32 rx_filter_options;
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
u8 tid_trigger;
u8 ssid_len;
- u32 ssid[8];
+ u8 ssid[32];
} __attribute__ ((packed));
-struct basic_scan_channel_parameters {
+struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
u32 max_duration; /* in TU */
u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
-struct cmd_scan {
+struct wl1251_cmd_scan {
struct wl1251_cmd_header header;
- struct basic_scan_parameters params;
- struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+ struct wl1251_scan_parameters params;
+ struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
} __attribute__ ((packed));
enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 4e373f3dbc43..a717dde4822e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -563,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static int wl1251_build_null_data(struct wl1251 *wl)
+static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
- struct wl12xx_null_data_template template;
+ struct ieee80211_qos_hdr template;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
+ memset(&template, 0, sizeof(template));
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
-
- return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
- sizeof(template));
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
-}
-
-static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
-{
- struct wl12xx_ps_poll_template template;
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
-
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
-
- return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
+ return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
-
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -635,10 +617,13 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->psm_requested = true;
+ wl->dtim_period = conf->ps_dtim_period;
+
+ ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+ wl->dtim_period);
+
/*
- * We enter PSM only if we're already associated.
- * If we're not, we'll enter it when joining an SSID,
- * through the bss_info_changed() hook.
+ * mac80211 enables PSM only if we're already associated.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
@@ -870,199 +855,61 @@ out:
return ret;
}
-static int wl1251_build_basic_rates(char *rates)
-{
- u8 index = 0;
-
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- return index;
-}
-
-static int wl1251_build_extended_rates(char *rates)
+static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
{
- u8 index = 0;
-
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
-
- return index;
-}
-
+ struct wl1251 *wl = hw->priv;
+ struct sk_buff *skb;
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+ int ret;
-static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len)
-{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
-
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1251_build_basic_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1251_build_extended_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
-
- return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
- size);
-}
+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 num_channels,
- u8 probe_requests)
-{
- struct wl1251_cmd_trigger_scan_to *trigger = NULL;
- struct cmd_scan *params = NULL;
- int i, ret;
- u16 scan_options = 0;
-
- if (wl->scanning)
- return -EINVAL;
-
- params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- params->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
- /* High priority scan */
- if (!active_scan)
- scan_options |= SCAN_PASSIVE;
- if (high_prio)
- scan_options |= SCAN_PRIORITY_HIGH;
- params->params.scan_options = scan_options;
-
- params->params.num_channels = num_channels;
- params->params.num_probe_requests = probe_requests;
- params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
- params->params.tid_trigger = 0;
-
- for (i = 0; i < num_channels; i++) {
- params->channels[i].min_duration = cpu_to_le32(30000);
- params->channels[i].max_duration = cpu_to_le32(60000);
- memset(&params->channels[i].bssid_lsb, 0xff, 4);
- memset(&params->channels[i].bssid_msb, 0xff, 2);
- params->channels[i].early_termination = 0;
- params->channels[i].tx_power_att = 0;
- params->channels[i].channel = i + 1;
- memset(params->channels[i].pad, 0, 3);
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
}
- for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
- memset(&params->channels[i], 0,
- sizeof(struct basic_scan_channel_parameters));
-
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
- } else {
- params->params.ssid_len = 0;
- memset(params->params.ssid, 0, 32);
- }
+ mutex_lock(&wl->mutex);
- ret = wl1251_build_probe_req(wl, ssid, len);
- if (ret < 0) {
- wl1251_error("PROBE request template failed");
+ if (wl->scanning) {
+ wl1251_debug(DEBUG_SCAN, "scan already in progress");
+ ret = -EINVAL;
goto out;
}
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!trigger)
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
goto out;
- trigger->timeout = 0;
-
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger));
- if (ret < 0) {
- wl1251_error("trigger scan to failed for hw scan");
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ req->ie, req->ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
}
- wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
-
- wl->scanning = true;
+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- wl1251_error("SCAN failed");
+ goto out_sleep;
- wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+ wl->scanning = true;
- if (params->header.status != CMD_STATUS_SUCCESS) {
- wl1251_error("TEST command answer error: %d",
- params->header.status);
+ ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
+ req->n_channels, WL1251_SCAN_NUM_PROBES);
+ if (ret < 0) {
wl->scanning = false;
- ret = -EIO;
- goto out;
- }
-
-out:
- kfree(params);
- return ret;
-
-}
-
-static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
-{
- struct wl1251 *wl = hw->priv;
- int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ goto out_sleep;
}
- mutex_lock(&wl->mutex);
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
-
+out_sleep:
wl1251_ps_elp_sleep(wl);
out:
@@ -1099,9 +946,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1251_cmd_ps_mode mode;
struct wl1251 *wl = hw->priv;
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1115,7 +961,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- ret = wl1251_build_null_data(wl);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
+ skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
@@ -1130,27 +986,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
- wl->dtim_period = bss_conf->dtim_period;
- ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
- wl->dtim_period);
- wl->aid = bss_conf->aid;
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
- ret = wl1251_build_ps_poll(wl, wl->aid);
+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
+ skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, wl->aid);
+ ret = wl1251_acx_aid(wl, bss_conf->aid);
if (ret < 0)
goto out_sleep;
-
- /* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1251_ps_set_mode(wl, mode);
- if (ret < 0)
- goto out_sleep;
- }
} else {
/* use defaults when not associated */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
@@ -1182,7 +1032,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
- goto out;
+ goto out_sleep;
}
}
@@ -1193,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0) {
dev_kfree_skb(beacon);
- goto out;
+ goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1202,13 +1052,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(beacon);
if (ret < 0)
- goto out;
+ goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
- goto out;
+ goto out_sleep;
}
out_sleep:
@@ -1282,6 +1132,7 @@ static struct ieee80211_channel wl1251_channels[] = {
static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
+ enum wl1251_acx_ps_scheme ps_scheme;
struct wl1251 *wl = hw->priv;
int ret;
@@ -1299,10 +1150,14 @@ static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (ret < 0)
goto out_sleep;
+ if (params->uapsd)
+ ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
+
ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
CHANNEL_TYPE_EDCF,
- wl1251_tx_get_queue(queue),
- WL1251_ACX_PS_SCHEME_LEGACY,
+ wl1251_tx_get_queue(queue), ps_scheme,
WL1251_ACX_ACK_POLICY_LEGACY);
if (ret < 0)
goto out_sleep;
@@ -1376,7 +1231,8 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER;
+ IEEE80211_HW_BEACON_FILTER |
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wl->hw->wiphy->max_scan_ssids = 1;
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c7..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
-/*
- * Interrupt registers.
- * 64 bit interrupt sources registers ws ced.
- * sme interupts were removed and new ones were added.
- * Order was changed.
- */
-#define FIQ_MASK (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
-#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
-#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
-#define IRQ_MASK (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
-#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
-#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
-#define ECPU_MASK (REGISTERS_BASE + 0x0448)
-#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
-#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
-#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
-#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
-#define INT_STS_ND (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
-#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
-#define INT_ACK (REGISTERS_BASE + 0x046C)
-#define INT_ACK_L (REGISTERS_BASE + 0x046C)
-#define INT_ACK_H (REGISTERS_BASE + 0x0470)
-#define INT_TRIG (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
-#define HOST_STS_L (REGISTERS_BASE + 0x045C)
-#define HOST_STS_H (REGISTERS_BASE + 0x0460)
-#define HOST_MASK (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
-#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
-#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
-/* Host Interrupts*/
-#define HINT_MASK (REGISTERS_BASE + 0x0494)
-#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
-#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
-#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
-/*1150 spec calls this HINT_STS_RAW*/
-#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
-#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
-#define HINT_ACK (REGISTERS_BASE + 0x04A8)
-#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
-
/*=============================================
Host Interrupt Mask Register - 32bit (RW)
------------------------------------------
@@ -433,16 +370,6 @@
/*===============================================
- Phy regs
- ===============================================*/
-#define ACX_PHY_ADDR_REG SBB_ADDR
-#define ACX_PHY_DATA_REG SBB_DATA
-#define ACX_PHY_CTRL_REG SBB_CTL
-#define ACX_PHY_REG_WR_MASK 0x00000001ul
-#define ACX_PHY_REG_RD_MASK 0x00000002ul
-
-
-/*===============================================
EEPROM Read/Write Request 32bit RW
------------------------------------------
1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
#define ACX_CONT_WIND_MIN_MASK 0x0000007f
#define ACX_CONT_WIND_MAX 0x03ff0000
-/*
- * Indirect slave register/memory registers
- * ----------------------------------------
- */
-#define HW_SLAVE_REG_ADDR_REG 0x00000004
-#define HW_SLAVE_REG_DATA_REG 0x00000008
-#define HW_SLAVE_REG_CTRL_REG 0x0000000c
-
-#define SLAVE_AUTO_INC 0x00010000
-#define SLAVE_NO_AUTO_INC 0x00000000
-#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
-
-#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
-#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
-#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
-#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
-
-#define HW_FUNC_EVENT_INT_EN 0x8000
-#define HW_FUNC_EVENT_MASK_REG 0x00000034
-
-#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
-
/*===============================================
HI_CFG Interface Configuration Register Values
------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
******************************************************************************/
-#define TNETW1251_CHIP_ID_PG1_0 0x07010101
-#define TNETW1251_CHIP_ID_PG1_1 0x07020101
-#define TNETW1251_CHIP_ID_PG1_2 0x07030101
-
/*************************************************************************
Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e732..ee9564aa6ecc 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -397,8 +397,7 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
/* poll for data ready */
do {
val = wl1271_spi_read32(wl, OCP_DATA_READ);
- timeout--;
- } while (!(val & OCP_READY_MASK) && timeout);
+ } while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 709fe5e06f73..2d555cc30508 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -987,12 +987,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- /* changed_flags is always populated but this driver
- * doesn't support all FIF flags so its possible we don't
- * need to do anything */
- if (!changed_flags)
- return;
-
+ /*
+ * If multicast parameter (as returned by zd_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 4daf1c94ec04..442fc1117326 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff2..8b231b30fd12 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{ }
};
-static const struct pci_device_id yellowfin_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }